repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
golismero/golismero-devel | tools/sqlmap/tamper/space2mysqlblank.py | 8 | 1878 | #!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import os
import random
from lib.core.common import singleTimeWarnMessage
from lib.core.enums import DBMS
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.LOW
def dependencies():
singleTimeWarnMessage("tamper script '%s' is only meant to be run against %s" % (os.path.basename(__file__).split(".")[0], DBMS.MYSQL))
def tamper(payload, **kwargs):
"""
Replaces space character (' ') with a random blank character from a
valid set of alternate characters
Requirement:
* MySQL
Tested against:
* MySQL 5.1
Notes:
* Useful to bypass several web application firewalls
>>> random.seed(0)
>>> tamper('SELECT id FROM users')
'SELECT%0Bid%0DFROM%0Cusers'
"""
# ASCII table:
# TAB 09 horizontal TAB
# LF 0A new line
# FF 0C new page
# CR 0D carriage return
# VT 0B vertical TAB (MySQL and Microsoft SQL Server only)
blanks = ('%09', '%0A', '%0C', '%0D', '%0B')
retVal = payload
if payload:
retVal = ""
quote, doublequote, firstspace = False, False, False
for i in xrange(len(payload)):
if not firstspace:
if payload[i].isspace():
firstspace = True
retVal += random.choice(blanks)
continue
elif payload[i] == '\'':
quote = not quote
elif payload[i] == '"':
doublequote = not doublequote
elif payload[i] == " " and not doublequote and not quote:
retVal += random.choice(blanks)
continue
retVal += payload[i]
return retVal
| gpl-2.0 |
ltilve/chromium | tools/multi_process_rss.py | 128 | 3646 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Counts a resident set size (RSS) of multiple processes without double-counts.
# If they share the same page frame, the page frame is counted only once.
#
# Usage:
# ./multi-process-rss.py <pid>|<pid>r [...]
#
# If <pid> has 'r' at the end, all descendants of the process are accounted.
#
# Example:
# ./multi-process-rss.py 12345 23456r
#
# The command line above counts the RSS of 1) process 12345, 2) process 23456
# and 3) all descendant processes of process 23456.
import collections
import logging
import os
import psutil
import sys
if sys.platform.startswith('linux'):
_TOOLS_PATH = os.path.dirname(os.path.abspath(__file__))
_TOOLS_LINUX_PATH = os.path.join(_TOOLS_PATH, 'linux')
sys.path.append(_TOOLS_LINUX_PATH)
import procfs # pylint: disable=F0401
class _NullHandler(logging.Handler):
def emit(self, record):
pass
_LOGGER = logging.getLogger('multi-process-rss')
_LOGGER.addHandler(_NullHandler())
def _recursive_get_children(pid):
try:
children = psutil.Process(pid).get_children()
except psutil.error.NoSuchProcess:
return []
descendant = []
for child in children:
descendant.append(child.pid)
descendant.extend(_recursive_get_children(child.pid))
return descendant
def list_pids(argv):
pids = []
for arg in argv[1:]:
try:
if arg.endswith('r'):
recursive = True
pid = int(arg[:-1])
else:
recursive = False
pid = int(arg)
except ValueError:
raise SyntaxError("%s is not an integer." % arg)
else:
pids.append(pid)
if recursive:
children = _recursive_get_children(pid)
pids.extend(children)
pids = sorted(set(pids), key=pids.index) # uniq: maybe slow, but simple.
return pids
def count_pageframes(pids):
pageframes = collections.defaultdict(int)
pagemap_dct = {}
for pid in pids:
maps = procfs.ProcMaps.load(pid)
if not maps:
_LOGGER.warning('/proc/%d/maps not found.' % pid)
continue
pagemap = procfs.ProcPagemap.load(pid, maps)
if not pagemap:
_LOGGER.warning('/proc/%d/pagemap not found.' % pid)
continue
pagemap_dct[pid] = pagemap
for pid, pagemap in pagemap_dct.iteritems():
for vma in pagemap.vma_internals.itervalues():
for pageframe, number in vma.pageframes.iteritems():
pageframes[pageframe] += number
return pageframes
def count_statm(pids):
resident = 0
shared = 0
private = 0
for pid in pids:
statm = procfs.ProcStatm.load(pid)
if not statm:
_LOGGER.warning('/proc/%d/statm not found.' % pid)
continue
resident += statm.resident
shared += statm.share
private += (statm.resident - statm.share)
return (resident, shared, private)
def main(argv):
logging_handler = logging.StreamHandler()
logging_handler.setLevel(logging.WARNING)
logging_handler.setFormatter(logging.Formatter(
'%(asctime)s:%(name)s:%(levelname)s:%(message)s'))
_LOGGER.setLevel(logging.WARNING)
_LOGGER.addHandler(logging_handler)
if sys.platform.startswith('linux'):
logging.getLogger('procfs').setLevel(logging.WARNING)
logging.getLogger('procfs').addHandler(logging_handler)
pids = list_pids(argv)
pageframes = count_pageframes(pids)
else:
_LOGGER.error('%s is not supported.' % sys.platform)
return 1
# TODO(dmikurube): Classify this total RSS.
print len(pageframes) * 4096
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
bdelzell/creditcoin-org-creditcoin | qa/rpc-tests/mempool_limit.py | 22 | 2177 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test mempool limiting together/eviction with the wallet
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MempoolLimitTest(BitcoinTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxmempool=5", "-spendzeroconfchange=0", "-debug"]))
self.is_network_split = False
self.sync_all()
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.txouts = gen_return_txouts()
def run_test(self):
txids = []
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], 90)
#create a mempool tx that will be evicted
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
self.nodes[0].settxfee(self.relayfee) # specifically fund this tx with low fee
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0) # return to automatic fee selection
txFS = self.nodes[0].signrawtransaction(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in range (4):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[30*i:30*i+30], (i+1)*base_fee)
# by now, the tx should be evicted, check confirmation state
assert(txid not in self.nodes[0].getrawmempool())
txdata = self.nodes[0].gettransaction(txid)
assert(txdata['confirmations'] == 0) #confirmation should still be 0
if __name__ == '__main__':
MempoolLimitTest().main()
| mit |
Gaia3D/QGIS | python/plugins/processing/algs/lidar/fusion/TinSurfaceCreate.py | 7 | 3452 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Catalog.py
---------------------
Date : June 2014
Copyright : (C) 2014 by Agresta S. Coop
Email : iescamochero at agresta dot org
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Agresta S. Coop - www.agresta.org'
__date__ = 'June 2014'
__copyright__ = '(C) 2014, Agresta S. Coop'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from processing.core.parameters import ParameterFile
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputFile
from FusionAlgorithm import FusionAlgorithm
from FusionUtils import FusionUtils
from processing.core.parameters import ParameterString
class TinSurfaceCreate(FusionAlgorithm):
INPUT = 'INPUT'
OUTPUT_DTM = 'OUTPUT_DTM'
CELLSIZE = 'CELLSIZE'
XYUNITS = 'XYUNITS'
ZUNITS = 'ZUNITS'
UNITS = ['Meter', 'Feet']
CLASS = 'CLASS'
def defineCharacteristics(self):
self.name = 'Tin Surface Create'
self.group = 'Surface'
self.addParameter(ParameterFile(
self.INPUT, self.tr('Input las layer')))
self.addParameter(ParameterNumber(self.CELLSIZE,
self.tr('Cellsize'), 0, None, 10.0))
self.addParameter(ParameterSelection(self.XYUNITS,
self.tr('XY Units'), self.UNITS))
self.addParameter(ParameterSelection(self.ZUNITS,
self.tr('Z Units'), self.UNITS))
self.addOutput(OutputFile(self.OUTPUT_DTM,
self.tr('DTM Output Surface'), 'dtm'))
class_var = ParameterString(self.CLASS,
self.tr('Class'), 2, False, True)
class_var.isAdvanced = True
self.addParameter(class_var)
def processAlgorithm(self, progress):
commands = [os.path.join(FusionUtils.FusionPath(), 'TINSurfaceCreate.exe')]
commands.append('/verbose')
class_var = self.getParameterValue(self.CLASS)
if str(class_var).strip() != '':
commands.append('/class:' + str(class_var))
commands.append(self.getOutputValue(self.OUTPUT_DTM))
commands.append(str(self.getParameterValue(self.CELLSIZE)))
commands.append(self.UNITS[self.getParameterValue(self.XYUNITS)][0])
commands.append(self.UNITS[self.getParameterValue(self.ZUNITS)][0])
commands.append('0')
commands.append('0')
commands.append('0')
commands.append('0')
files = self.getParameterValue(self.INPUT).split(';')
if len(files) == 1:
commands.append(self.getParameterValue(self.INPUT))
else:
commands.extend(files)
FusionUtils.runFusion(commands, progress)
| gpl-2.0 |
ZuluPro/libcloud | libcloud/test/test_logging_connection.py | 12 | 2769 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from io import StringIO
import zlib
import requests_mock
import libcloud
from libcloud.test import unittest
from libcloud.common.base import Connection
from libcloud.http import LibcloudConnection
from libcloud.utils.loggingconnection import LoggingConnection
class TestLoggingConnection(unittest.TestCase):
def tearDown(self):
Connection.conn_class = LibcloudConnection
def test_debug_method_uses_log_class(self):
with StringIO() as fh:
libcloud.enable_debug(fh)
conn = Connection(timeout=10)
conn.connect()
self.assertTrue(isinstance(conn.connection, LoggingConnection))
def test_debug_log_class_handles_request(self):
with StringIO() as fh:
libcloud.enable_debug(fh)
conn = Connection(url='http://test.com/')
conn.connect()
self.assertEqual(conn.connection.host, 'http://test.com')
with requests_mock.mock() as m:
m.get('http://test.com/test', text='data')
conn.request('/test')
log = fh.getvalue()
self.assertTrue(isinstance(conn.connection, LoggingConnection))
self.assertIn('-i -X GET', log)
self.assertIn('data', log)
def test_debug_log_class_handles_request_with_compression(self):
request = zlib.compress(b'data')
with StringIO() as fh:
libcloud.enable_debug(fh)
conn = Connection(url='http://test.com/')
conn.connect()
self.assertEqual(conn.connection.host, 'http://test.com')
with requests_mock.mock() as m:
m.get('http://test.com/test', content=request,
headers={'content-encoding': 'zlib'})
conn.request('/test')
log = fh.getvalue()
self.assertTrue(isinstance(conn.connection, LoggingConnection))
self.assertIn('-i -X GET', log)
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
pelya/commandergenius | project/jni/python/src/Lib/encodings/cp737.py | 593 | 34937 | """ Python Character Mapping Codec cp737 generated from 'VENDORS/MICSFT/PC/CP737.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp737',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x0081: 0x0392, # GREEK CAPITAL LETTER BETA
0x0082: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x0083: 0x0394, # GREEK CAPITAL LETTER DELTA
0x0084: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x0085: 0x0396, # GREEK CAPITAL LETTER ZETA
0x0086: 0x0397, # GREEK CAPITAL LETTER ETA
0x0087: 0x0398, # GREEK CAPITAL LETTER THETA
0x0088: 0x0399, # GREEK CAPITAL LETTER IOTA
0x0089: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x008a: 0x039b, # GREEK CAPITAL LETTER LAMDA
0x008b: 0x039c, # GREEK CAPITAL LETTER MU
0x008c: 0x039d, # GREEK CAPITAL LETTER NU
0x008d: 0x039e, # GREEK CAPITAL LETTER XI
0x008e: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x008f: 0x03a0, # GREEK CAPITAL LETTER PI
0x0090: 0x03a1, # GREEK CAPITAL LETTER RHO
0x0091: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x0092: 0x03a4, # GREEK CAPITAL LETTER TAU
0x0093: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x0094: 0x03a6, # GREEK CAPITAL LETTER PHI
0x0095: 0x03a7, # GREEK CAPITAL LETTER CHI
0x0096: 0x03a8, # GREEK CAPITAL LETTER PSI
0x0097: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x0098: 0x03b1, # GREEK SMALL LETTER ALPHA
0x0099: 0x03b2, # GREEK SMALL LETTER BETA
0x009a: 0x03b3, # GREEK SMALL LETTER GAMMA
0x009b: 0x03b4, # GREEK SMALL LETTER DELTA
0x009c: 0x03b5, # GREEK SMALL LETTER EPSILON
0x009d: 0x03b6, # GREEK SMALL LETTER ZETA
0x009e: 0x03b7, # GREEK SMALL LETTER ETA
0x009f: 0x03b8, # GREEK SMALL LETTER THETA
0x00a0: 0x03b9, # GREEK SMALL LETTER IOTA
0x00a1: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00a2: 0x03bb, # GREEK SMALL LETTER LAMDA
0x00a3: 0x03bc, # GREEK SMALL LETTER MU
0x00a4: 0x03bd, # GREEK SMALL LETTER NU
0x00a5: 0x03be, # GREEK SMALL LETTER XI
0x00a6: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00a7: 0x03c0, # GREEK SMALL LETTER PI
0x00a8: 0x03c1, # GREEK SMALL LETTER RHO
0x00a9: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00aa: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00ab: 0x03c4, # GREEK SMALL LETTER TAU
0x00ac: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00ad: 0x03c6, # GREEK SMALL LETTER PHI
0x00ae: 0x03c7, # GREEK SMALL LETTER CHI
0x00af: 0x03c8, # GREEK SMALL LETTER PSI
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00e1: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x00e2: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x00e3: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x00e4: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00e5: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00e6: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00e7: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00e8: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00e9: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00ea: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x00eb: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x00ec: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x00ed: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x00ee: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x00ef: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x00f0: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x00f5: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0391' # 0x0080 -> GREEK CAPITAL LETTER ALPHA
u'\u0392' # 0x0081 -> GREEK CAPITAL LETTER BETA
u'\u0393' # 0x0082 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0x0083 -> GREEK CAPITAL LETTER DELTA
u'\u0395' # 0x0084 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0x0085 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0x0086 -> GREEK CAPITAL LETTER ETA
u'\u0398' # 0x0087 -> GREEK CAPITAL LETTER THETA
u'\u0399' # 0x0088 -> GREEK CAPITAL LETTER IOTA
u'\u039a' # 0x0089 -> GREEK CAPITAL LETTER KAPPA
u'\u039b' # 0x008a -> GREEK CAPITAL LETTER LAMDA
u'\u039c' # 0x008b -> GREEK CAPITAL LETTER MU
u'\u039d' # 0x008c -> GREEK CAPITAL LETTER NU
u'\u039e' # 0x008d -> GREEK CAPITAL LETTER XI
u'\u039f' # 0x008e -> GREEK CAPITAL LETTER OMICRON
u'\u03a0' # 0x008f -> GREEK CAPITAL LETTER PI
u'\u03a1' # 0x0090 -> GREEK CAPITAL LETTER RHO
u'\u03a3' # 0x0091 -> GREEK CAPITAL LETTER SIGMA
u'\u03a4' # 0x0092 -> GREEK CAPITAL LETTER TAU
u'\u03a5' # 0x0093 -> GREEK CAPITAL LETTER UPSILON
u'\u03a6' # 0x0094 -> GREEK CAPITAL LETTER PHI
u'\u03a7' # 0x0095 -> GREEK CAPITAL LETTER CHI
u'\u03a8' # 0x0096 -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0x0097 -> GREEK CAPITAL LETTER OMEGA
u'\u03b1' # 0x0098 -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0x0099 -> GREEK SMALL LETTER BETA
u'\u03b3' # 0x009a -> GREEK SMALL LETTER GAMMA
u'\u03b4' # 0x009b -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0x009c -> GREEK SMALL LETTER EPSILON
u'\u03b6' # 0x009d -> GREEK SMALL LETTER ZETA
u'\u03b7' # 0x009e -> GREEK SMALL LETTER ETA
u'\u03b8' # 0x009f -> GREEK SMALL LETTER THETA
u'\u03b9' # 0x00a0 -> GREEK SMALL LETTER IOTA
u'\u03ba' # 0x00a1 -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0x00a2 -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0x00a3 -> GREEK SMALL LETTER MU
u'\u03bd' # 0x00a4 -> GREEK SMALL LETTER NU
u'\u03be' # 0x00a5 -> GREEK SMALL LETTER XI
u'\u03bf' # 0x00a6 -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0x00a7 -> GREEK SMALL LETTER PI
u'\u03c1' # 0x00a8 -> GREEK SMALL LETTER RHO
u'\u03c3' # 0x00a9 -> GREEK SMALL LETTER SIGMA
u'\u03c2' # 0x00aa -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c4' # 0x00ab -> GREEK SMALL LETTER TAU
u'\u03c5' # 0x00ac -> GREEK SMALL LETTER UPSILON
u'\u03c6' # 0x00ad -> GREEK SMALL LETTER PHI
u'\u03c7' # 0x00ae -> GREEK SMALL LETTER CHI
u'\u03c8' # 0x00af -> GREEK SMALL LETTER PSI
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03c9' # 0x00e0 -> GREEK SMALL LETTER OMEGA
u'\u03ac' # 0x00e1 -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u03ad' # 0x00e2 -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0x00e3 -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03ca' # 0x00e4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03af' # 0x00e5 -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03cc' # 0x00e6 -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u03cd' # 0x00e7 -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03cb' # 0x00e8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u03ce' # 0x00e9 -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\u0386' # 0x00ea -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\u0388' # 0x00eb -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0389' # 0x00ec -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\u038a' # 0x00ed -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\u038c' # 0x00ee -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\u038e' # 0x00ef -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u038f' # 0x00f0 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u03aa' # 0x00f4 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\u03ab' # 0x00f5 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b7: 0x00fa, # MIDDLE DOT
0x00f7: 0x00f6, # DIVISION SIGN
0x0386: 0x00ea, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x0388: 0x00eb, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x0389: 0x00ec, # GREEK CAPITAL LETTER ETA WITH TONOS
0x038a: 0x00ed, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x038c: 0x00ee, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x038e: 0x00ef, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x038f: 0x00f0, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x0391: 0x0080, # GREEK CAPITAL LETTER ALPHA
0x0392: 0x0081, # GREEK CAPITAL LETTER BETA
0x0393: 0x0082, # GREEK CAPITAL LETTER GAMMA
0x0394: 0x0083, # GREEK CAPITAL LETTER DELTA
0x0395: 0x0084, # GREEK CAPITAL LETTER EPSILON
0x0396: 0x0085, # GREEK CAPITAL LETTER ZETA
0x0397: 0x0086, # GREEK CAPITAL LETTER ETA
0x0398: 0x0087, # GREEK CAPITAL LETTER THETA
0x0399: 0x0088, # GREEK CAPITAL LETTER IOTA
0x039a: 0x0089, # GREEK CAPITAL LETTER KAPPA
0x039b: 0x008a, # GREEK CAPITAL LETTER LAMDA
0x039c: 0x008b, # GREEK CAPITAL LETTER MU
0x039d: 0x008c, # GREEK CAPITAL LETTER NU
0x039e: 0x008d, # GREEK CAPITAL LETTER XI
0x039f: 0x008e, # GREEK CAPITAL LETTER OMICRON
0x03a0: 0x008f, # GREEK CAPITAL LETTER PI
0x03a1: 0x0090, # GREEK CAPITAL LETTER RHO
0x03a3: 0x0091, # GREEK CAPITAL LETTER SIGMA
0x03a4: 0x0092, # GREEK CAPITAL LETTER TAU
0x03a5: 0x0093, # GREEK CAPITAL LETTER UPSILON
0x03a6: 0x0094, # GREEK CAPITAL LETTER PHI
0x03a7: 0x0095, # GREEK CAPITAL LETTER CHI
0x03a8: 0x0096, # GREEK CAPITAL LETTER PSI
0x03a9: 0x0097, # GREEK CAPITAL LETTER OMEGA
0x03aa: 0x00f4, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x03ab: 0x00f5, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x03ac: 0x00e1, # GREEK SMALL LETTER ALPHA WITH TONOS
0x03ad: 0x00e2, # GREEK SMALL LETTER EPSILON WITH TONOS
0x03ae: 0x00e3, # GREEK SMALL LETTER ETA WITH TONOS
0x03af: 0x00e5, # GREEK SMALL LETTER IOTA WITH TONOS
0x03b1: 0x0098, # GREEK SMALL LETTER ALPHA
0x03b2: 0x0099, # GREEK SMALL LETTER BETA
0x03b3: 0x009a, # GREEK SMALL LETTER GAMMA
0x03b4: 0x009b, # GREEK SMALL LETTER DELTA
0x03b5: 0x009c, # GREEK SMALL LETTER EPSILON
0x03b6: 0x009d, # GREEK SMALL LETTER ZETA
0x03b7: 0x009e, # GREEK SMALL LETTER ETA
0x03b8: 0x009f, # GREEK SMALL LETTER THETA
0x03b9: 0x00a0, # GREEK SMALL LETTER IOTA
0x03ba: 0x00a1, # GREEK SMALL LETTER KAPPA
0x03bb: 0x00a2, # GREEK SMALL LETTER LAMDA
0x03bc: 0x00a3, # GREEK SMALL LETTER MU
0x03bd: 0x00a4, # GREEK SMALL LETTER NU
0x03be: 0x00a5, # GREEK SMALL LETTER XI
0x03bf: 0x00a6, # GREEK SMALL LETTER OMICRON
0x03c0: 0x00a7, # GREEK SMALL LETTER PI
0x03c1: 0x00a8, # GREEK SMALL LETTER RHO
0x03c2: 0x00aa, # GREEK SMALL LETTER FINAL SIGMA
0x03c3: 0x00a9, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00ab, # GREEK SMALL LETTER TAU
0x03c5: 0x00ac, # GREEK SMALL LETTER UPSILON
0x03c6: 0x00ad, # GREEK SMALL LETTER PHI
0x03c7: 0x00ae, # GREEK SMALL LETTER CHI
0x03c8: 0x00af, # GREEK SMALL LETTER PSI
0x03c9: 0x00e0, # GREEK SMALL LETTER OMEGA
0x03ca: 0x00e4, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x03cb: 0x00e8, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x03cc: 0x00e6, # GREEK SMALL LETTER OMICRON WITH TONOS
0x03cd: 0x00e7, # GREEK SMALL LETTER UPSILON WITH TONOS
0x03ce: 0x00e9, # GREEK SMALL LETTER OMEGA WITH TONOS
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| lgpl-2.1 |
xtech9/spksrc | spk/subliminal/src/app/application/auth.py | 42 | 2064 | from collections import namedtuple
from flask import abort, request
from functools import wraps, partial
from subprocess import check_output
import grp
import os
import pwd
__all__ = ['authenticate', 'requires_auth']
def authenticate():
"""Authenticate a user using Synology's authenticate.cgi
If the user is authenticated, returns a nametuple with the
username and its groups, if not returns None. For example::
>>> authenticate()
User(name='admin', groups=['administrators'])
:rtype: namedtuple or None
"""
User = namedtuple('User', ['name', 'groups'])
with open(os.devnull, 'w') as devnull:
user = check_output(['/usr/syno/synoman/webman/modules/authenticate.cgi'], stderr=devnull).strip()
if not user:
return None
groups = [g.gr_name for g in grp.getgrall() if user in g.gr_mem]
groups.append(grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name)
return User(user, set(groups))
def requires_auth(f=None, groups=None, users=None):
"""Require a user to be authenticated. If he is not, this aborts
on 403.
The condition to be authorized is for the user to be authenticated
and in one of the listed groups (if any) or one of the listed users
(if any)
:param function f: the decorated function
:param list groups: groups whitelist
:param list users: users whitelist
"""
if f is None:
return partial(requires_auth, groups=groups, users=users)
@wraps(f)
def decorated(*args, **kwargs):
user = authenticate()
if user is None: # Not authenticated
abort(403)
# A user is authorized if he is in the groups whitelist or the users whitelist
authorized = False
if groups is not None and len(set(groups) & user.groups) > 0: # Authorized group
authorized = True
if users is not None and user.name in users: # Authorized user
authorized = True
if not authorized:
abort(403)
return f(*args, **kwargs)
return decorated
| bsd-3-clause |
kevintaw/django | tests/postgres_tests/test_ranges.py | 161 | 24567 | import datetime
import json
import unittest
from django import forms
from django.core import exceptions, serializers
from django.db import connection
from django.db.models import F
from django.test import TestCase, override_settings
from django.utils import timezone
from . import PostgreSQLTestCase
from .models import RangeLookupsModel, RangesModel
try:
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange
from django.contrib.postgres import fields as pg_fields, forms as pg_forms
from django.contrib.postgres.validators import (
RangeMaxValueValidator, RangeMinValueValidator,
)
except ImportError:
pass
def skipUnlessPG92(test):
try:
PG_VERSION = connection.pg_version
except AttributeError:
PG_VERSION = 0
if PG_VERSION < 90200:
return unittest.skip('PostgreSQL >= 9.2 required')(test)
return test
@skipUnlessPG92
class TestSaveLoad(TestCase):
def test_all_fields(self):
now = timezone.now()
instance = RangesModel(
ints=NumericRange(0, 10),
bigints=NumericRange(10, 20),
floats=NumericRange(20, 30),
timestamps=DateTimeTZRange(now - datetime.timedelta(hours=1), now),
dates=DateRange(now.date() - datetime.timedelta(days=1), now.date()),
)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(instance.ints, loaded.ints)
self.assertEqual(instance.bigints, loaded.bigints)
self.assertEqual(instance.floats, loaded.floats)
self.assertEqual(instance.timestamps, loaded.timestamps)
self.assertEqual(instance.dates, loaded.dates)
def test_range_object(self):
r = NumericRange(0, 10)
instance = RangesModel(ints=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.ints)
def test_tuple(self):
instance = RangesModel(ints=(0, 10))
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(NumericRange(0, 10), loaded.ints)
def test_range_object_boundaries(self):
r = NumericRange(0, 10, '[]')
instance = RangesModel(floats=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.floats)
self.assertTrue(10 in loaded.floats)
def test_unbounded(self):
r = NumericRange(None, None, '()')
instance = RangesModel(floats=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.floats)
def test_empty(self):
r = NumericRange(empty=True)
instance = RangesModel(ints=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.ints)
def test_null(self):
instance = RangesModel(ints=None)
instance.save()
loaded = RangesModel.objects.get()
self.assertIsNone(loaded.ints)
@skipUnlessPG92
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
RangesModel.objects.create(ints=NumericRange(0, 10)),
RangesModel.objects.create(ints=NumericRange(5, 15)),
RangesModel.objects.create(ints=NumericRange(None, 0)),
RangesModel.objects.create(ints=NumericRange(empty=True)),
RangesModel.objects.create(ints=None),
]
def test_exact(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__exact=NumericRange(0, 10)),
[self.objs[0]],
)
def test_isnull(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__isnull=True),
[self.objs[4]],
)
def test_isempty(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__isempty=True),
[self.objs[3]],
)
def test_contains(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contains=8),
[self.objs[0], self.objs[1]],
)
def test_contains_range(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contains=NumericRange(3, 8)),
[self.objs[0]],
)
def test_contained_by(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contained_by=NumericRange(0, 20)),
[self.objs[0], self.objs[1], self.objs[3]],
)
def test_overlap(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__overlap=NumericRange(3, 8)),
[self.objs[0], self.objs[1]],
)
def test_fully_lt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__fully_lt=NumericRange(5, 10)),
[self.objs[2]],
)
def test_fully_gt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__fully_gt=NumericRange(5, 10)),
[],
)
def test_not_lt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__not_lt=NumericRange(5, 10)),
[self.objs[1]],
)
def test_not_gt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__not_gt=NumericRange(5, 10)),
[self.objs[0], self.objs[2]],
)
def test_adjacent_to(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__adjacent_to=NumericRange(0, 5)),
[self.objs[1], self.objs[2]],
)
def test_startswith(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__startswith=0),
[self.objs[0]],
)
def test_endswith(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__endswith=0),
[self.objs[2]],
)
def test_startswith_chaining(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__startswith__gte=0),
[self.objs[0], self.objs[1]],
)
@skipUnlessPG92
class TestQueringWithRanges(TestCase):
def test_date_range(self):
objs = [
RangeLookupsModel.objects.create(date='2015-01-01'),
RangeLookupsModel.objects.create(date='2015-05-05'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(date__contained_by=DateRange('2015-01-01', '2015-05-04')),
[objs[0]],
)
def test_date_range_datetime_field(self):
objs = [
RangeLookupsModel.objects.create(timestamp='2015-01-01'),
RangeLookupsModel.objects.create(timestamp='2015-05-05'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(timestamp__date__contained_by=DateRange('2015-01-01', '2015-05-04')),
[objs[0]],
)
def test_datetime_range(self):
objs = [
RangeLookupsModel.objects.create(timestamp='2015-01-01T09:00:00'),
RangeLookupsModel.objects.create(timestamp='2015-05-05T17:00:00'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(
timestamp__contained_by=DateTimeTZRange('2015-01-01T09:00', '2015-05-04T23:55')
),
[objs[0]],
)
def test_integer_range(self):
objs = [
RangeLookupsModel.objects.create(integer=5),
RangeLookupsModel.objects.create(integer=99),
RangeLookupsModel.objects.create(integer=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(integer__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_biginteger_range(self):
objs = [
RangeLookupsModel.objects.create(big_integer=5),
RangeLookupsModel.objects.create(big_integer=99),
RangeLookupsModel.objects.create(big_integer=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(big_integer__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_float_range(self):
objs = [
RangeLookupsModel.objects.create(float=5),
RangeLookupsModel.objects.create(float=99),
RangeLookupsModel.objects.create(float=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(float__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_f_ranges(self):
parent = RangesModel.objects.create(floats=NumericRange(0, 10))
objs = [
RangeLookupsModel.objects.create(float=5, parent=parent),
RangeLookupsModel.objects.create(float=99, parent=parent),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(float__contained_by=F('parent__floats')),
[objs[0]]
)
def test_exclude(self):
objs = [
RangeLookupsModel.objects.create(float=5),
RangeLookupsModel.objects.create(float=99),
RangeLookupsModel.objects.create(float=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.exclude(float__contained_by=NumericRange(0, 100)),
[objs[2]]
)
@skipUnlessPG92
class TestSerialization(TestCase):
test_data = (
'[{"fields": {"ints": "{\\"upper\\": \\"10\\", \\"lower\\": \\"0\\", '
'\\"bounds\\": \\"[)\\"}", "floats": "{\\"empty\\": true}", '
'"bigints": null, "timestamps": "{\\"upper\\": \\"2014-02-02T12:12:12+00:00\\", '
'\\"lower\\": \\"2014-01-01T00:00:00+00:00\\", \\"bounds\\": \\"[)\\"}", '
'"dates": "{\\"upper\\": \\"2014-02-02\\", \\"lower\\": \\"2014-01-01\\", \\"bounds\\": \\"[)\\"}" }, '
'"model": "postgres_tests.rangesmodel", "pk": null}]'
)
lower_date = datetime.date(2014, 1, 1)
upper_date = datetime.date(2014, 2, 2)
lower_dt = datetime.datetime(2014, 1, 1, 0, 0, 0, tzinfo=timezone.utc)
upper_dt = datetime.datetime(2014, 2, 2, 12, 12, 12, tzinfo=timezone.utc)
def test_dumping(self):
instance = RangesModel(ints=NumericRange(0, 10), floats=NumericRange(empty=True),
timestamps=DateTimeTZRange(self.lower_dt, self.upper_dt),
dates=DateRange(self.lower_date, self.upper_date))
data = serializers.serialize('json', [instance])
dumped = json.loads(data)
for field in ('ints', 'dates', 'timestamps'):
dumped[0]['fields'][field] = json.loads(dumped[0]['fields'][field])
check = json.loads(self.test_data)
for field in ('ints', 'dates', 'timestamps'):
check[0]['fields'][field] = json.loads(check[0]['fields'][field])
self.assertEqual(dumped, check)
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.ints, NumericRange(0, 10))
self.assertEqual(instance.floats, NumericRange(empty=True))
self.assertEqual(instance.bigints, None)
class TestValidators(PostgreSQLTestCase):
def test_max(self):
validator = RangeMaxValueValidator(5)
validator(NumericRange(0, 5))
with self.assertRaises(exceptions.ValidationError) as cm:
validator(NumericRange(0, 10))
self.assertEqual(cm.exception.messages[0], 'Ensure that this range is completely less than or equal to 5.')
self.assertEqual(cm.exception.code, 'max_value')
def test_min(self):
validator = RangeMinValueValidator(5)
validator(NumericRange(10, 15))
with self.assertRaises(exceptions.ValidationError) as cm:
validator(NumericRange(0, 10))
self.assertEqual(cm.exception.messages[0], 'Ensure that this range is completely greater than or equal to 5.')
self.assertEqual(cm.exception.code, 'min_value')
class TestFormField(PostgreSQLTestCase):
def test_valid_integer(self):
field = pg_forms.IntegerRangeField()
value = field.clean(['1', '2'])
self.assertEqual(value, NumericRange(1, 2))
def test_valid_floats(self):
field = pg_forms.FloatRangeField()
value = field.clean(['1.12345', '2.001'])
self.assertEqual(value, NumericRange(1.12345, 2.001))
def test_valid_timestamps(self):
field = pg_forms.DateTimeRangeField()
value = field.clean(['01/01/2014 00:00:00', '02/02/2014 12:12:12'])
lower = datetime.datetime(2014, 1, 1, 0, 0, 0)
upper = datetime.datetime(2014, 2, 2, 12, 12, 12)
self.assertEqual(value, DateTimeTZRange(lower, upper))
def test_valid_dates(self):
field = pg_forms.DateRangeField()
value = field.clean(['01/01/2014', '02/02/2014'])
lower = datetime.date(2014, 1, 1)
upper = datetime.date(2014, 2, 2)
self.assertEqual(value, DateRange(lower, upper))
def test_using_split_datetime_widget(self):
class SplitDateTimeRangeField(pg_forms.DateTimeRangeField):
base_field = forms.SplitDateTimeField
class SplitForm(forms.Form):
field = SplitDateTimeRangeField()
form = SplitForm()
self.assertHTMLEqual(str(form), '''
<tr>
<th>
<label for="id_field_0">Field:</label>
</th>
<td>
<input id="id_field_0_0" name="field_0_0" type="text" />
<input id="id_field_0_1" name="field_0_1" type="text" />
<input id="id_field_1_0" name="field_1_0" type="text" />
<input id="id_field_1_1" name="field_1_1" type="text" />
</td>
</tr>
''')
form = SplitForm({
'field_0_0': '01/01/2014',
'field_0_1': '00:00:00',
'field_1_0': '02/02/2014',
'field_1_1': '12:12:12',
})
self.assertTrue(form.is_valid())
lower = datetime.datetime(2014, 1, 1, 0, 0, 0)
upper = datetime.datetime(2014, 2, 2, 12, 12, 12)
self.assertEqual(form.cleaned_data['field'], DateTimeTZRange(lower, upper))
def test_none(self):
field = pg_forms.IntegerRangeField(required=False)
value = field.clean(['', ''])
self.assertEqual(value, None)
def test_rendering(self):
class RangeForm(forms.Form):
ints = pg_forms.IntegerRangeField()
self.assertHTMLEqual(str(RangeForm()), '''
<tr>
<th><label for="id_ints_0">Ints:</label></th>
<td>
<input id="id_ints_0" name="ints_0" type="number" />
<input id="id_ints_1" name="ints_1" type="number" />
</td>
</tr>
''')
def test_integer_lower_bound_higher(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['10', '2'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_integer_open(self):
field = pg_forms.IntegerRangeField()
value = field.clean(['', '0'])
self.assertEqual(value, NumericRange(None, 0))
def test_integer_incorrect_data_type(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1')
self.assertEqual(cm.exception.messages[0], 'Enter two whole numbers.')
self.assertEqual(cm.exception.code, 'invalid')
def test_integer_invalid_lower(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '2'])
self.assertEqual(cm.exception.messages[0], 'Enter a whole number.')
def test_integer_invalid_upper(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a whole number.')
def test_integer_required(self):
field = pg_forms.IntegerRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean([1, ''])
self.assertEqual(value, NumericRange(1, None))
def test_float_lower_bound_higher(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1.8', '1.6'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_float_open(self):
field = pg_forms.FloatRangeField()
value = field.clean(['', '3.1415926'])
self.assertEqual(value, NumericRange(None, 3.1415926))
def test_float_incorrect_data_type(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1.6')
self.assertEqual(cm.exception.messages[0], 'Enter two numbers.')
self.assertEqual(cm.exception.code, 'invalid')
def test_float_invalid_lower(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '3.1415926'])
self.assertEqual(cm.exception.messages[0], 'Enter a number.')
def test_float_invalid_upper(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1.61803399', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a number.')
def test_float_required(self):
field = pg_forms.FloatRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['1.61803399', ''])
self.assertEqual(value, NumericRange(1.61803399, None))
def test_date_lower_bound_higher(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09', '1976-04-16'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_date_open(self):
field = pg_forms.DateRangeField()
value = field.clean(['', '2013-04-09'])
self.assertEqual(value, DateRange(None, datetime.date(2013, 4, 9)))
def test_date_incorrect_data_type(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1')
self.assertEqual(cm.exception.messages[0], 'Enter two valid dates.')
self.assertEqual(cm.exception.code, 'invalid')
def test_date_invalid_lower(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '2013-04-09'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date.')
def test_date_invalid_upper(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date.')
def test_date_required(self):
field = pg_forms.DateRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['1976-04-16', ''])
self.assertEqual(value, DateRange(datetime.date(1976, 4, 16), None))
def test_datetime_lower_bound_higher(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2006-10-25 14:59', '2006-10-25 14:58'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_datetime_open(self):
field = pg_forms.DateTimeRangeField()
value = field.clean(['', '2013-04-09 11:45'])
self.assertEqual(value, DateTimeTZRange(None, datetime.datetime(2013, 4, 9, 11, 45)))
def test_datetime_incorrect_data_type(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('2013-04-09 11:45')
self.assertEqual(cm.exception.messages[0], 'Enter two valid date/times.')
self.assertEqual(cm.exception.code, 'invalid')
def test_datetime_invalid_lower(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['45', '2013-04-09 11:45'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.')
def test_datetime_invalid_upper(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09 11:45', 'sweet pickles'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.')
def test_datetime_required(self):
field = pg_forms.DateTimeRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['2013-04-09 11:45', ''])
self.assertEqual(value, DateTimeTZRange(datetime.datetime(2013, 4, 9, 11, 45), None))
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Johannesburg')
def test_datetime_prepare_value(self):
field = pg_forms.DateTimeRangeField()
value = field.prepare_value(
DateTimeTZRange(datetime.datetime(2015, 5, 22, 16, 6, 33, tzinfo=timezone.utc), None)
)
self.assertEqual(value, [datetime.datetime(2015, 5, 22, 18, 6, 33), None])
def test_model_field_formfield_integer(self):
model_field = pg_fields.IntegerRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.IntegerRangeField)
def test_model_field_formfield_biginteger(self):
model_field = pg_fields.BigIntegerRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.IntegerRangeField)
def test_model_field_formfield_float(self):
model_field = pg_fields.FloatRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.FloatRangeField)
def test_model_field_formfield_date(self):
model_field = pg_fields.DateRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.DateRangeField)
def test_model_field_formfield_datetime(self):
model_field = pg_fields.DateTimeRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.DateTimeRangeField)
class TestWidget(PostgreSQLTestCase):
def test_range_widget(self):
f = pg_forms.ranges.DateTimeRangeField()
self.assertHTMLEqual(
f.widget.render('datetimerange', ''),
'<input type="text" name="datetimerange_0" /><input type="text" name="datetimerange_1" />'
)
self.assertHTMLEqual(
f.widget.render('datetimerange', None),
'<input type="text" name="datetimerange_0" /><input type="text" name="datetimerange_1" />'
)
dt_range = DateTimeTZRange(
datetime.datetime(2006, 1, 10, 7, 30),
datetime.datetime(2006, 2, 12, 9, 50)
)
self.assertHTMLEqual(
f.widget.render('datetimerange', dt_range),
'<input type="text" name="datetimerange_0" value="2006-01-10 07:30:00" /><input type="text" name="datetimerange_1" value="2006-02-12 09:50:00" />'
)
| bsd-3-clause |
Big-B702/python-for-android | python3-alpha/python3-src/Lib/importlib/test/test_api.py | 46 | 2976 | from . import util
import imp
import importlib
import sys
import unittest
class ImportModuleTests(unittest.TestCase):
"""Test importlib.import_module."""
def test_module_import(self):
# Test importing a top-level module.
with util.mock_modules('top_level') as mock:
with util.import_state(meta_path=[mock]):
module = importlib.import_module('top_level')
self.assertEqual(module.__name__, 'top_level')
def test_absolute_package_import(self):
# Test importing a module from a package with an absolute name.
pkg_name = 'pkg'
pkg_long_name = '{0}.__init__'.format(pkg_name)
name = '{0}.mod'.format(pkg_name)
with util.mock_modules(pkg_long_name, name) as mock:
with util.import_state(meta_path=[mock]):
module = importlib.import_module(name)
self.assertEqual(module.__name__, name)
def test_shallow_relative_package_import(self):
# Test importing a module from a package through a relative import.
pkg_name = 'pkg'
pkg_long_name = '{0}.__init__'.format(pkg_name)
module_name = 'mod'
absolute_name = '{0}.{1}'.format(pkg_name, module_name)
relative_name = '.{0}'.format(module_name)
with util.mock_modules(pkg_long_name, absolute_name) as mock:
with util.import_state(meta_path=[mock]):
importlib.import_module(pkg_name)
module = importlib.import_module(relative_name, pkg_name)
self.assertEqual(module.__name__, absolute_name)
def test_deep_relative_package_import(self):
modules = ['a.__init__', 'a.b.__init__', 'a.c']
with util.mock_modules(*modules) as mock:
with util.import_state(meta_path=[mock]):
importlib.import_module('a')
importlib.import_module('a.b')
module = importlib.import_module('..c', 'a.b')
self.assertEqual(module.__name__, 'a.c')
def test_absolute_import_with_package(self):
# Test importing a module from a package with an absolute name with
# the 'package' argument given.
pkg_name = 'pkg'
pkg_long_name = '{0}.__init__'.format(pkg_name)
name = '{0}.mod'.format(pkg_name)
with util.mock_modules(pkg_long_name, name) as mock:
with util.import_state(meta_path=[mock]):
importlib.import_module(pkg_name)
module = importlib.import_module(name, pkg_name)
self.assertEqual(module.__name__, name)
def test_relative_import_wo_package(self):
# Relative imports cannot happen without the 'package' argument being
# set.
with self.assertRaises(TypeError):
importlib.import_module('.support')
def test_main():
from test.support import run_unittest
run_unittest(ImportModuleTests)
if __name__ == '__main__':
test_main()
| apache-2.0 |
axelkennedal/dissen | dissenEnv/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/escsm.py | 2930 | 7839 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
HZ_cls = (
1,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,4,0,5,2,0, # 78 - 7f
1,1,1,1,1,1,1,1, # 80 - 87
1,1,1,1,1,1,1,1, # 88 - 8f
1,1,1,1,1,1,1,1, # 90 - 97
1,1,1,1,1,1,1,1, # 98 - 9f
1,1,1,1,1,1,1,1, # a0 - a7
1,1,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,1,1,1,1,1,1, # c0 - c7
1,1,1,1,1,1,1,1, # c8 - cf
1,1,1,1,1,1,1,1, # d0 - d7
1,1,1,1,1,1,1,1, # d8 - df
1,1,1,1,1,1,1,1, # e0 - e7
1,1,1,1,1,1,1,1, # e8 - ef
1,1,1,1,1,1,1,1, # f0 - f7
1,1,1,1,1,1,1,1, # f8 - ff
)
HZ_st = (
eStart,eError, 3,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart, 4,eError,# 10-17
5,eError, 6,eError, 5, 5, 4,eError,# 18-1f
4,eError, 4, 4, 4,eError, 4,eError,# 20-27
4,eItsMe,eStart,eStart,eStart,eStart,eStart,eStart,# 28-2f
)
HZCharLenTable = (0, 0, 0, 0, 0, 0)
HZSMModel = {'classTable': HZ_cls,
'classFactor': 6,
'stateTable': HZ_st,
'charLenTable': HZCharLenTable,
'name': "HZ-GB-2312"}
ISO2022CN_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,4,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022CN_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eError,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eError,eError,eError, 4,eError,# 18-1f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 20-27
5, 6,eError,eError,eError,eError,eError,eError,# 28-2f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 30-37
eError,eError,eError,eError,eError,eItsMe,eError,eStart,# 38-3f
)
ISO2022CNCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022CNSMModel = {'classTable': ISO2022CN_cls,
'classFactor': 9,
'stateTable': ISO2022CN_st,
'charLenTable': ISO2022CNCharLenTable,
'name': "ISO-2022-CN"}
ISO2022JP_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,2,2, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,7,0,0,0, # 20 - 27
3,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
6,0,4,0,8,0,0,0, # 40 - 47
0,9,5,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022JP_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eStart,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,# 18-1f
eError, 5,eError,eError,eError, 4,eError,eError,# 20-27
eError,eError,eError, 6,eItsMe,eError,eItsMe,eError,# 28-2f
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,# 30-37
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 38-3f
eError,eError,eError,eError,eItsMe,eError,eStart,eStart,# 40-47
)
ISO2022JPCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022JPSMModel = {'classTable': ISO2022JP_cls,
'classFactor': 10,
'stateTable': ISO2022JP_st,
'charLenTable': ISO2022JPCharLenTable,
'name': "ISO-2022-JP"}
ISO2022KR_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,3,0,0,0, # 20 - 27
0,4,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,5,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022KR_st = (
eStart, 3,eError,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eError, 4,eError,eError,# 10-17
eError,eError,eError,eError, 5,eError,eError,eError,# 18-1f
eError,eError,eError,eItsMe,eStart,eStart,eStart,eStart,# 20-27
)
ISO2022KRCharLenTable = (0, 0, 0, 0, 0, 0)
ISO2022KRSMModel = {'classTable': ISO2022KR_cls,
'classFactor': 6,
'stateTable': ISO2022KR_st,
'charLenTable': ISO2022KRCharLenTable,
'name': "ISO-2022-KR"}
# flake8: noqa
| mit |
TylerB24890/flask | tests/test_appctx.py | 141 | 4034 | # -*- coding: utf-8 -*-
"""
tests.appctx
~~~~~~~~~~~~
Tests the application context.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import flask
def test_basic_url_generation():
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost'
app.config['PREFERRED_URL_SCHEME'] = 'https'
@app.route('/')
def index():
pass
with app.app_context():
rv = flask.url_for('index')
assert rv == 'https://localhost/'
def test_url_generation_requires_server_name():
app = flask.Flask(__name__)
with app.app_context():
with pytest.raises(RuntimeError):
flask.url_for('index')
def test_url_generation_without_context_fails():
with pytest.raises(RuntimeError):
flask.url_for('index')
def test_request_context_means_app_context():
app = flask.Flask(__name__)
with app.test_request_context():
assert flask.current_app._get_current_object() == app
assert flask._app_ctx_stack.top is None
def test_app_context_provides_current_app():
app = flask.Flask(__name__)
with app.app_context():
assert flask.current_app._get_current_object() == app
assert flask._app_ctx_stack.top is None
def test_app_tearing_down():
cleanup_stuff = []
app = flask.Flask(__name__)
@app.teardown_appcontext
def cleanup(exception):
cleanup_stuff.append(exception)
with app.app_context():
pass
assert cleanup_stuff == [None]
def test_app_tearing_down_with_previous_exception():
cleanup_stuff = []
app = flask.Flask(__name__)
@app.teardown_appcontext
def cleanup(exception):
cleanup_stuff.append(exception)
try:
raise Exception('dummy')
except Exception:
pass
with app.app_context():
pass
assert cleanup_stuff == [None]
def test_app_tearing_down_with_handled_exception():
cleanup_stuff = []
app = flask.Flask(__name__)
@app.teardown_appcontext
def cleanup(exception):
cleanup_stuff.append(exception)
with app.app_context():
try:
raise Exception('dummy')
except Exception:
pass
assert cleanup_stuff == [None]
def test_app_ctx_globals_methods():
app = flask.Flask(__name__)
with app.app_context():
# get
assert flask.g.get('foo') is None
assert flask.g.get('foo', 'bar') == 'bar'
# __contains__
assert 'foo' not in flask.g
flask.g.foo = 'bar'
assert 'foo' in flask.g
# setdefault
flask.g.setdefault('bar', 'the cake is a lie')
flask.g.setdefault('bar', 'hello world')
assert flask.g.bar == 'the cake is a lie'
# pop
assert flask.g.pop('bar') == 'the cake is a lie'
with pytest.raises(KeyError):
flask.g.pop('bar')
assert flask.g.pop('bar', 'more cake') == 'more cake'
# __iter__
assert list(flask.g) == ['foo']
def test_custom_app_ctx_globals_class():
class CustomRequestGlobals(object):
def __init__(self):
self.spam = 'eggs'
app = flask.Flask(__name__)
app.app_ctx_globals_class = CustomRequestGlobals
with app.app_context():
assert flask.render_template_string('{{ g.spam }}') == 'eggs'
def test_context_refcounts():
called = []
app = flask.Flask(__name__)
@app.teardown_request
def teardown_req(error=None):
called.append('request')
@app.teardown_appcontext
def teardown_app(error=None):
called.append('app')
@app.route('/')
def index():
with flask._app_ctx_stack.top:
with flask._request_ctx_stack.top:
pass
env = flask._request_ctx_stack.top.request.environ
assert env['werkzeug.request'] is not None
return u''
c = app.test_client()
res = c.get('/')
assert res.status_code == 200
assert res.data == b''
assert called == ['request', 'app']
| bsd-3-clause |
pnasrat/puppet-codereview | rietveld.py | 1 | 6634 | #!/usr/bin/python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to simplify some common AppEngine actions.
Use 'rietveld help' for a list of commands.
"""
import logging
import os
import re
import shutil
import subprocess
import sys
import zipfile
APPCFG = 'appcfg.py'
DEV_APPSERVER = 'dev_appserver.py'
RELEASE = 'release'
ZIPFILE = 'django.zip'
FILES = ["app.yaml", "index.yaml",
"__init__.py", "main.py", "settings.py", "urls.py"]
DIRS = ["static", "templates", "codereview"]
IGNORED_DIR = (".svn", "gis", "admin", "localflavor", "mysql", "mysql_old",
"oracle", "postgresql", "postgresql_psycopg2", "sqlite3",
"test")
IGNORED_EXT = (".pyc", ".pyo", ".po", ".mo")
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShell(command, print_output=False):
"""Executes a command and returns the output."""
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=use_shell)
output = ""
while True:
line = p.stdout.readline()
if not line:
break
if print_output:
print line.strip('\n')
output += line
p.wait()
p.stdout.close()
return output
def Help():
print "Available commands:"
print "help"
print "release"
print "serve"
print "serve_email"
print "serve_remote"
print "serve_remote_email"
print "update"
print "update_indexes"
print "upload"
print "vacuum_indexes"
def CreateRelease():
""" Creates a "release" subdirectory.
This is a subdirectory containing a bunch of symlinks, from which the app can
be updated. The main reason for this is to import Django from a zipfile,
which saves dramatically in upload time: statting and computing the SHA1 for
1000s of files is slow. Even if most of those files don't actually need to
be uploaded, they still add to the work done for each update.
"""
def GetDjangoFiles():
"""Return a list of Django files to send to the server.
We prune:
- .svn subdirectories for obvious reasons.
- the other directories are huge and unneeded.
- *.po and *.mo files because they are bulky and unneeded.
- *.pyc and *.pyo because they aren't used by App Engine anyway.
"""
result = []
for root, dirs, files in os.walk("django"):
dirs[:] = [d for d in dirs if d not in IGNORED_DIR]
for file in files:
unused, extension = os.path.splitext(file)
if extension in IGNORED_EXT:
continue
result.append(os.path.join(root, file))
return result
def CopyRietveldDirectory(src, dst):
"""Copies a directory used by Rietveld.
Skips ".svn" directories and ".pyc" files.
"""
for root, dirs, files in os.walk(src):
if not os.path.exists(os.path.join(dst, root)):
os.mkdir(os.path.join(dst, root))
for file in files:
unused, extension = os.path.splitext(file)
if extension in (".pyc", ".pyo"):
continue
shutil.copyfile(os.path.join(root, file), os.path.join(dst, root, file))
dirs[:] = [d for d in dirs if d not in (".svn")]
for dir in dirs:
os.mkdir(os.path.join(dst, root, dir))
# Remove old ZIPFILE file.
if os.path.exists(ZIPFILE):
os.remove(ZIPFILE)
django_files = GetDjangoFiles()
django_zip = zipfile.ZipFile(ZIPFILE, "w")
for file in django_files:
django_zip.write(file, compress_type=zipfile.ZIP_DEFLATED)
django_zip.close()
# Remove old RELEASE directory.
if sys.platform.startswith("win"):
RunShell(["rmdir", "/s", "/q", RELEASE])
else:
RunShell(["rm", "-rf", RELEASE])
# Create new RELEASE directory.
os.mkdir(RELEASE)
if sys.platform.startswith("win"):
# No symbolic links on Windows, just copy.
for x in FILES + [ZIPFILE]:
shutil.copyfile(x, os.path.join(RELEASE, x))
for x in DIRS:
CopyRietveldDirectory(x, RELEASE)
else:
# Create symbolic links.
for x in FILES + DIRS + [ZIPFILE]:
RunShell(["ln", "-s", "../" + x, os.path.join(RELEASE, x)])
def GetApplicationName():
file = open("app.yaml", "r")
result = file.read()
file.close()
APP_REGEXP = ".*?application: ([\w\-]+)"
return re.compile(APP_REGEXP, re.DOTALL).match(result).group(1)
def Update(args):
print "Updating " + GetApplicationName()
output = RunShell(["svn", "info"])
revision = re.compile(".*?\nRevision: (\d+)",
re.DOTALL).match(output).group(1)
revision_file = os.path.join("templates", "live_revision.html")
file = open(revision_file, "w")
file.write('This is <a class="novisit" '
'href="http://code.google.com/p/rietveld/">Rietveld</a> r' +
revision)
file.close()
CreateRelease()
appcfg_args = [APPCFG, "update", RELEASE] + args
# Use os.system here because input might be required, and that doesn't work
# through subprocess.Popen.
os.system(" ".join(appcfg_args))
RunShell(["svn", "revert", revision_file])
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
Help()
return 0
command = argv[1]
if command == "help":
Help()
elif command == "serve":
RunShell([DEV_APPSERVER, "."], True)
elif command == "serve_remote":
RunShell([DEV_APPSERVER, "--address", "0.0.0.0", "."], True)
elif command == "serve_email":
RunShell([DEV_APPSERVER, "--enable_sendmail", "."], True)
elif command == "serve_remote_email":
RunShell([DEV_APPSERVER, "--enable_sendmail", "--address", "0.0.0.0", "."],
True)
elif command == "release":
CreateRelease()
elif command in ("update", "upload"):
Update(argv[2:])
elif command == "update_indexes":
RunShell([APPCFG, "update_indexes", "."], True)
elif command == "vacuum_indexes":
RunShell([APPCFG, "vacuum_indexes", "."], True)
else:
print "Unknown command: " + command
return 2
return 0
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
xxxIsaacPeralxxx/anim-studio-tools | review_tool/sources/reviewTool/util/diagnosis.py | 5 | 3430 | ##
# \namespace reviewTool.util.diagnosis
#
# \remarks Creates diagnosis information about data
#
# \author eric.hulser@drdstudios.com
# \author Dr. D Studios
# \date 08/16/11
#
import ConfigParser
import os
import subprocess
import sys
import time
from reviewTool import settings
from PyQt4.QtCore import QThread
class DiagnosisThread(QThread):
def __init__( self, collection ):
super(DiagnosisThread,self).__init__()
self.collection = collection
def run( self ):
# save the inputed collection to disk
parser = ConfigParser.ConfigParser()
for key, entries in self.collection.items():
parser.add_section(key)
for i, entry in enumerate(entries):
option = 'entry%i' % i
parser.set(key,option, '%s|%s' % entry)
# save the data to file
tempFile = settings.tempPath( 'comparison_config.ini' )
f = open( tempFile, 'w' )
parser.write( f )
f.close()
# run the comparison in a separate process
proc = subprocess.Popen( 'python2.5 %s %s' % (__file__,tempFile), shell = True )
proc.communicate()
# generateReport(filename = tempFile)
#--------------------------------------------------------------------------------
def generateReport( filename = '', collection = {} ):
import bkdDiagn.sg_bkdDiagn
# load the data from file
if ( filename ):
parser = ConfigParser.ConfigParser()
parser.read(filename)
# create the collection
collection = {}
for section in parser.sections():
data = []
for option in parser.options(section):
value = parser.get(section,option)
result = value.split('|')
if ( len(result) != 2 ):
continue
data.append( result )
collection[section] = data
if ( not collection ):
return False
# process the results
result = bkdDiagn.sg_bkdDiagn.batch_process2(collection.items())
html_file_name = time.ctime().replace( ' ', '_' ).replace( ':', '-' ) + '.html'
html_file_path = settings.desktopPath( 'breakdown_diagnosis' )
if ( not os.path.exists( html_file_path ) ):
os.mkdir( html_file_path )
filename = os.path.join( html_file_path, html_file_name )
# save the html data to file
f = open( filename, 'w' )
f.write( result )
f.close()
return True
if ( __name__ == '__main__' ):
generateReport( filename = sys.argv[1] )
# Copyright 2008-2012 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios)
#
# This file is part of anim-studio-tools.
#
# anim-studio-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# anim-studio-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with anim-studio-tools. If not, see <http://www.gnu.org/licenses/>.
| gpl-3.0 |
F8LEFT/ART | KDE/share/ECM/find-modules/rules_engine.py | 1 | 21090 | #!/usr/bin/env python
#=============================================================================
# Copyright 2016 by Shaheed Haque (srhaque@theiet.org)
# Copyright 2016 Stephen Kelly <steveire@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#=============================================================================
"""SIP file generation rules engine."""
from __future__ import print_function
from abc import *
import argparse
import gettext
import inspect
import logging
import os
import re
import sys
import textwrap
import traceback
from copy import deepcopy
from clang.cindex import CursorKind
from clang.cindex import AccessSpecifier
class HelpFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
pass
logger = logging.getLogger(__name__)
gettext.install(__name__)
_SEPARATOR = "\x00"
def _parents(container):
parents = []
parent = container.semantic_parent
while parent and parent.kind != CursorKind.TRANSLATION_UNIT:
parents.append(parent.spelling)
parent = parent.semantic_parent
if parents:
parents = "::".join(reversed(parents))
else:
parents = os.path.basename(container.translation_unit.spelling)
return parents
class Rule(object):
def __init__(self, db, rule_number, fn, pattern_zip):
self.db = db
self.rule_number = rule_number
self.fn = fn
self.usage = 0
try:
groups = ["(?P<{}>{})".format(name, pattern) for pattern, name in pattern_zip]
groups = _SEPARATOR.join(groups)
self.matcher = re.compile(groups)
except Exception as e:
groups = ["{} '{}'".format(name, pattern) for pattern, name in pattern_zip]
groups = ", ".join(groups)
raise RuntimeError(_("Bad {}: {}: {}").format(self, groups, e))
def match(self, candidate):
return self.matcher.match(candidate)
def trace_result(self, parents, item, original, modified):
fqn = parents + "::" + original["name"] + "[" + str(item.extent.start.line) + "]"
self._trace_result(fqn, original, modified)
def _trace_result(self, fqn, original, modified):
if not modified["name"]:
logger.debug(_("Rule {} suppressed {}, {}").format(self, fqn, original))
else:
delta = False
for k, v in original.iteritems():
if v != modified[k]:
delta = True
break
if delta:
logger.debug(_("Rule {} modified {}, {}->{}").format(self, fqn, original, modified))
else:
logger.warn(_("Rule {} did not modify {}, {}").format(self, fqn, original))
def __str__(self):
return "[{},{}]".format(self.rule_number, self.fn.__name__)
class AbstractCompiledRuleDb(object):
__metaclass__ = ABCMeta
def __init__(self, db, parameter_names):
self.db = db
self.compiled_rules = []
for i, raw_rule in enumerate(db()):
if len(raw_rule) != len(parameter_names) + 1:
raise RuntimeError(_("Bad raw rule {}: {}: {}").format(db.__name__, raw_rule, parameter_names))
z = zip(raw_rule[:-1], parameter_names)
self.compiled_rules.append(Rule(db, i, raw_rule[-1], z))
self.candidate_formatter = _SEPARATOR.join(["{}"] * len(parameter_names))
def _match(self, *args):
candidate = self.candidate_formatter.format(*args)
for rule in self.compiled_rules:
matcher = rule.match(candidate)
if matcher:
#
# Only use the first matching rule.
#
rule.usage += 1
return matcher, rule
return None, None
@abstractmethod
def apply(self, *args):
raise NotImplemented(_("Missing subclass"))
def dump_usage(self, fn):
""" Dump the usage counts."""
for rule in self.compiled_rules:
fn(self.__class__.__name__, str(rule), rule.usage)
class ContainerRuleDb(AbstractCompiledRuleDb):
"""
THE RULES FOR CONTAINERS.
These are used to customise the behaviour of the SIP generator by allowing
the declaration for any container (class, namespace, struct, union) to be
customised, for example to add SIP compiler annotations.
Each entry in the raw rule database must be a list with members as follows:
0. A regular expression which matches the fully-qualified name of the
"container" enclosing the container.
1. A regular expression which matches the container name.
2. A regular expression which matches any template parameters.
3. A regular expression which matches the container declaration.
4. A regular expression which matches any base specifiers.
5. A function.
In use, the database is walked in order from the first entry. If the regular
expressions are matched, the function is called, and no further entries are
walked. The function is called with the following contract:
def container_xxx(container, sip, matcher):
'''
Return a modified declaration for the given container.
:param container: The clang.cindex.Cursor for the container.
:param sip: A dict with the following keys:
name The name of the container.
template_parameters Any template parameters.
decl The declaration.
base_specifiers Any base specifiers.
body The body, less the outer
pair of braces.
annotations Any SIP annotations.
:param matcher: The re.Match object. This contains named
groups corresponding to the key names above
EXCEPT body and annotations.
:return: An updated set of sip.xxx values. Setting sip.name to the
empty string will cause the container to be suppressed.
'''
:return: The compiled form of the rules.
"""
def __init__(self, db):
super(ContainerRuleDb, self).__init__(db, ["parents", "container", "template_parameters", "decl", "base_specifiers"])
def apply(self, container, sip):
"""
Walk over the rules database for functions, applying the first matching transformation.
:param container: The clang.cindex.Cursor for the container.
:param sip: The SIP dict.
"""
parents = _parents(container)
matcher, rule = self._match(parents, sip["name"], sip["template_parameters"], sip["decl"], sip["base_specifiers"])
if matcher:
before = deepcopy(sip)
rule.fn(container, sip, matcher)
rule.trace_result(parents, container, before, sip)
class FunctionRuleDb(AbstractCompiledRuleDb):
"""
THE RULES FOR FUNCTIONS.
These are used to customise the behaviour of the SIP generator by allowing
the declaration for any function to be customised, for example to add SIP
compiler annotations.
Each entry in the raw rule database must be a list with members as follows:
0. A regular expression which matches the fully-qualified name of the
"container" enclosing the function.
1. A regular expression which matches the function name.
2. A regular expression which matches any template parameters.
3. A regular expression which matches the function result.
4. A regular expression which matches the function parameters (e.g.
"int a, void *b" for "int foo(int a, void *b)").
5. A function.
In use, the database is walked in order from the first entry. If the regular
expressions are matched, the function is called, and no further entries are
walked. The function is called with the following contract:
def function_xxx(container, function, sip, matcher):
'''
Return a modified declaration for the given function.
:param container: The clang.cindex.Cursor for the container.
:param function: The clang.cindex.Cursor for the function.
:param sip: A dict with the following keys:
name The name of the function.
template_parameters Any template parameters.
fn_result Result, if not a constructor.
decl The declaration.
prefix Leading keyworks ("static"). Separated by space,
ends with a space.
suffix Trailing keywords ("const"). Separated by space, starts with
space.
annotations Any SIP annotations.
:param matcher: The re.Match object. This contains named
groups corresponding to the key names above
EXCEPT annotations.
:return: An updated set of sip.xxx values. Setting sip.name to the
empty string will cause the container to be suppressed.
'''
:return: The compiled form of the rules.
"""
def __init__(self, db):
super(FunctionRuleDb, self).__init__(db, ["container", "function", "template_parameters", "fn_result", "parameters"])
def apply(self, container, function, sip):
"""
Walk over the rules database for functions, applying the first matching transformation.
:param container: The clang.cindex.Cursor for the container.
:param function: The clang.cindex.Cursor for the function.
:param sip: The SIP dict.
"""
parents = _parents(function)
matcher, rule = self._match(parents, sip["name"], ", ".join(sip["template_parameters"]), sip["fn_result"], ", ".join(sip["parameters"]))
if matcher:
before = deepcopy(sip)
rule.fn(container, function, sip, matcher)
rule.trace_result(parents, function, before, sip)
class ParameterRuleDb(AbstractCompiledRuleDb):
"""
THE RULES FOR FUNCTION PARAMETERS.
These are used to customise the behaviour of the SIP generator by allowing
the declaration for any parameter in any function to be customised, for
example to add SIP compiler annotations.
Each entry in the raw rule database must be a list with members as follows:
0. A regular expression which matches the fully-qualified name of the
"container" enclosing the function enclosing the parameter.
1. A regular expression which matches the function name enclosing the
parameter.
2. A regular expression which matches the parameter name.
3. A regular expression which matches the parameter declaration (e.g.
"int foo").
4. A regular expression which matches the parameter initialiser (e.g.
"Xyz:MYCONST + 42").
5. A function.
In use, the database is walked in order from the first entry. If the regular
expressions are matched, the function is called, and no further entries are
walked. The function is called with the following contract:
def parameter_xxx(container, function, parameter, sip, init, matcher):
'''
Return a modified declaration and initialiser for the given parameter.
:param container: The clang.cindex.Cursor for the container.
:param function: The clang.cindex.Cursor for the function.
:param parameter: The clang.cindex.Cursor for the parameter.
:param sip: A dict with the following keys:
name The name of the function.
decl The declaration.
init Any initialiser.
annotations Any SIP annotations.
:param matcher: The re.Match object. This contains named
groups corresponding to the key names above
EXCEPT annotations.
:return: An updated set of sip.xxx values.
'''
:return: The compiled form of the rules.
"""
def __init__(self, db):
super(ParameterRuleDb, self).__init__(db, ["container", "function", "parameter", "decl", "init"])
def apply(self, container, function, parameter, sip):
"""
Walk over the rules database for parameters, applying the first matching transformation.
:param container: The clang.cindex.Cursor for the container.
:param function: The clang.cindex.Cursor for the function.
:param parameter: The clang.cindex.Cursor for the parameter.
:param sip: The SIP dict.
"""
parents = _parents(function)
matcher, rule = self._match(parents, function.spelling, sip["name"], sip["decl"], sip["init"])
if matcher:
before = deepcopy(sip)
rule.fn(container, function, parameter, sip, matcher)
rule.trace_result(parents, parameter, before, sip)
class VariableRuleDb(AbstractCompiledRuleDb):
"""
THE RULES FOR VARIABLES.
These are used to customise the behaviour of the SIP generator by allowing
the declaration for any variable to be customised, for example to add SIP
compiler annotations.
Each entry in the raw rule database must be a list with members as follows:
0. A regular expression which matches the fully-qualified name of the
"container" enclosing the variable.
1. A regular expression which matches the variable name.
2. A regular expression which matches the variable declaration (e.g.
"int foo").
3. A function.
In use, the database is walked in order from the first entry. If the regular
expressions are matched, the function is called, and no further entries are
walked. The function is called with the following contract:
def variable_xxx(container, variable, sip, matcher):
'''
Return a modified declaration for the given variable.
:param container: The clang.cindex.Cursor for the container.
:param variable: The clang.cindex.Cursor for the variable.
:param sip: A dict with the following keys:
name The name of the variable.
decl The declaration.
annotations Any SIP annotations.
:param matcher: The re.Match object. This contains named
groups corresponding to the key names above
EXCEPT annotations.
:return: An updated set of sip.xxx values. Setting sip.name to the
empty string will cause the container to be suppressed.
'''
:return: The compiled form of the rules.
"""
def __init__(self, db):
super(VariableRuleDb, self).__init__(db, ["container", "variable", "decl"])
def apply(self, container, variable, sip):
"""
Walk over the rules database for variables, applying the first matching transformation.
:param container: The clang.cindex.Cursor for the container.
:param variable: The clang.cindex.Cursor for the variable.
:param sip: The SIP dict.
"""
parents = _parents(variable)
matcher, rule = self._match(parents, sip["name"], sip["decl"])
if matcher:
before = deepcopy(sip)
rule.fn(container, variable, sip, matcher)
rule.trace_result(parents, variable, before, sip)
class RuleSet(object):
"""
To implement your own binding, create a subclass of RuleSet, also called
RuleSet in your own Python module. Your subclass will expose the raw rules
along with other ancilliary data exposed through the subclass methods.
You then simply run the SIP generation and SIP compilation programs passing
in the name of your rules file
"""
__metaclass__ = ABCMeta
@abstractmethod
def container_rules(self):
"""
Return a compiled list of rules for containers.
:return: A ContainerRuleDb instance
"""
raise NotImplemented(_("Missing subclass implementation"))
@abstractmethod
def function_rules(self):
"""
Return a compiled list of rules for functions.
:return: A FunctionRuleDb instance
"""
raise NotImplemented(_("Missing subclass implementation"))
@abstractmethod
def parameter_rules(self):
"""
Return a compiled list of rules for function parameters.
:return: A ParameterRuleDb instance
"""
raise NotImplemented(_("Missing subclass implementation"))
@abstractmethod
def variable_rules(self):
"""
Return a compiled list of rules for variables.
:return: A VariableRuleDb instance
"""
raise NotImplemented(_("Missing subclass implementation"))
def dump_unused(self):
"""Usage statistics, to identify unused rules."""
def dumper(db_name, rule, usage):
if usage:
logger.info(_("Rule {}::{} used {} times".format(db_name, rule, usage)))
else:
logger.warn(_("Rule {}::{} unused".format(db_name, rule)))
for db in [self.container_rules(), self.function_rules(), self.parameter_rules(),
self.variable_rules()]:
db.dump_usage(dumper)
def container_discard(container, sip, matcher):
sip["name"] = ""
def function_discard(container, function, sip, matcher):
sip["name"] = ""
def parameter_transfer_to_parent(container, function, parameter, sip, matcher):
if function.is_static_method():
sip["annotations"].add("Transfer")
else:
sip["annotations"].add("TransferThis")
def param_rewrite_mode_t_as_int(container, function, parameter, sip, matcher):
sip["decl"] = sip["decl"].replace("mode_t", "unsigned int")
def return_rewrite_mode_t_as_int(container, function, sip, matcher):
sip["fn_result"] = "unsigned int"
def variable_discard(container, variable, sip, matcher):
sip["name"] = ""
def parameter_strip_class_enum(container, function, parameter, sip, matcher):
sip["decl"] = sip["decl"].replace("class ", "").replace("enum ", "")
def function_discard_impl(container, function, sip, matcher):
if function.extent.start.column == 1:
sip["name"] = ""
def rules(project_rules):
"""
Constructor.
:param project_rules: The rules file for the project.
"""
import imp
imp.load_source("project_rules", project_rules)
#
# Statically prepare the rule logic. This takes the rules provided by the user and turns them into code.
#
return getattr(sys.modules["project_rules"], "RuleSet")()
| gpl-3.0 |
tmhorne/simplewiki | tests/calendar.py | 4 | 7375 | # -*- coding: utf-8 -*-
# Copyright 2008,2014 Jaap Karssenberg <jaap.karssenberg@gmail.com>
from __future__ import with_statement
import tests
from datetime import date as dateclass
import zim.datetimetz
from zim.plugins import PluginManager
from zim.notebook import Path
from zim.templates import get_template
from zim.formats import get_dumper
from zim.plugins.calendar import NotebookExtension, \
MainWindowExtensionDialog, MainWindowExtensionEmbedded, \
CalendarDialog
from tests.gui import setupGtkInterface
class TestCalendarFunctions(tests.TestCase):
def testDatesForWeeks(self):
from zim.plugins.calendar import dates_for_week
zim.datetimetz.FIRST_DAY_OF_WEEK = \
zim.datetimetz.MONDAY
start, end = dates_for_week(2012, 17)
self.assertEqual(start, dateclass(2012, 4, 23)) # a monday
self.assertEqual(end, dateclass(2012, 4, 29)) # a sunday
zim.datetimetz.FIRST_DAY_OF_WEEK = \
zim.datetimetz.SUNDAY
start, end = dates_for_week(2012, 17)
self.assertEqual(start, dateclass(2012, 4 ,22)) # a sunday
self.assertEqual(end, dateclass(2012, 4, 28)) # a saturday
start, end = dates_for_week(2013, 1)
self.assertEqual(start, dateclass(2012, 12 ,30)) # a sunday
self.assertEqual(end, dateclass(2013, 1, 5)) # a saturday
start, end = dates_for_week(2009, 53)
self.assertEqual(start, dateclass(2009, 12 ,27)) # a sunday
self.assertEqual(end, dateclass(2010, 1, 2)) # a saturday
def testWeekCalendar(self):
from zim.plugins.calendar import weekcalendar
sunday = dateclass(2012, 4 ,22)
monday = dateclass(2012, 4, 23)
nextsunday = dateclass(2012, 4, 29)
zim.datetimetz.FIRST_DAY_OF_WEEK = \
zim.datetimetz.MONDAY
self.assertEqual(weekcalendar(sunday), (2012, 16, 7))
self.assertEqual(weekcalendar(monday), (2012, 17, 1))
self.assertEqual(weekcalendar(nextsunday), (2012, 17, 7))
zim.datetimetz.FIRST_DAY_OF_WEEK = \
zim.datetimetz.SUNDAY
self.assertEqual(weekcalendar(sunday), (2012, 17, 1))
self.assertEqual(weekcalendar(monday), (2012, 17, 2))
self.assertEqual(weekcalendar(nextsunday), (2012, 18, 1))
dec31 = dateclass(2012, 12, 31)
jan1 = dateclass(2013, 1, 1)
self.assertEqual(weekcalendar(dec31), (2013, 1, 2))
self.assertEqual(weekcalendar(jan1), (2013, 1, 3))
dec31 = dateclass(2009, 12, 31)
jan1 = dateclass(2010, 1, 1)
self.assertEqual(weekcalendar(dec31), (2009, 53, 5))
self.assertEqual(weekcalendar(jan1), (2009, 53, 6))
def testDateRangeFromPath(self):
from zim.plugins.calendar import daterange_from_path
# Day
for path in (Path('Foo:2012:04:27'), Path('Foo:2012:4:27')):
type, start, end = daterange_from_path(path)
self.assertEqual(type, 'day')
self.assertEqual(start, dateclass(2012, 4, 27))
self.assertEqual(end, dateclass(2012, 4, 27))
# Week
zim.datetimetz.FIRST_DAY_OF_WEEK = \
zim.datetimetz.MONDAY
type, start, end = daterange_from_path(Path('Foo:2012:Week 17'))
self.assertEqual(type, 'week')
self.assertEqual(start, dateclass(2012, 4, 23)) # a monday
self.assertEqual(end, dateclass(2012, 4, 29)) # a sunday
# Month
for path in (Path('Foo:2012:04'), Path('Foo:2012:4')):
type, start, end = daterange_from_path(path)
self.assertEqual(type, 'month')
self.assertEqual(start, dateclass(2012, 4, 1))
self.assertEqual(end, dateclass(2012, 4, 30))
# Year
type, start, end = daterange_from_path(Path('Foo:2012'))
self.assertEqual(type, 'year')
self.assertEqual(start, dateclass(2012, 1, 1))
self.assertEqual(end, dateclass(2012, 12, 31))
@tests.slowTest
class TestCalendarPlugin(tests.TestCase):
def testMainWindowExtensions(self):
pluginklass = PluginManager.get_plugin_class('calendar')
plugin = pluginklass()
notebook = tests.new_notebook(self.get_tmp_name())
ui = setupGtkInterface(self, notebook=notebook)
plugin.preferences['embedded'] = True
self.assertEqual(plugin.extension_classes['MainWindow'], MainWindowExtensionEmbedded)
plugin.extend(ui.mainwindow)
ext = list(plugin.extensions)
self.assertEqual(len(ext), 1)
self.assertIsInstance(ext[0], MainWindowExtensionEmbedded)
plugin.preferences.changed() # make sure no errors are triggered
ext[0].go_page_today()
self.assertTrue(ui.page.name.startswith('Journal:'))
plugin.preferences['embedded'] = False
self.assertEqual(plugin.extension_classes['MainWindow'], MainWindowExtensionDialog)
plugin.extend(ui.mainwindow) # plugin does not remember objects, manager does that
ext = list(plugin.extensions)
self.assertEqual(len(ext), 1)
self.assertIsInstance(ext[0], MainWindowExtensionDialog)
plugin.preferences.changed() # make sure no errors are triggered
def test_dialog(dialog):
self.assertIsInstance(dialog, CalendarDialog)
dialog.do_today('xxx')
ui.open_page(Path('foo'))
with tests.DialogContext(test_dialog):
ext[0].show_calendar()
plugin.preferences['embedded'] = True # switch back
def testNotebookExtension(self):
pluginklass = PluginManager.get_plugin_class('calendar')
plugin = pluginklass()
notebook = tests.new_notebook(self.get_tmp_name())
plugin.extend(notebook)
ext = list(plugin.extensions)
self.assertEqual(len(ext), 1)
self.assertIsInstance(ext[0], NotebookExtension)
page = Path('Foo')
link = notebook.suggest_link(page, '2014-01-06')
self.assertEqual(link.name, 'Journal:2014:01:06')
link = notebook.suggest_link(page, 'foo')
self.assertIsNone(link)
def testNamespace(self):
pluginklass = PluginManager.get_plugin_class('calendar')
plugin = pluginklass()
today = dateclass.today()
for namespace in (Path('Calendar'), Path(':')):
plugin.preferences['namespace'] = namespace
path = plugin.path_from_date(today)
self.assertTrue(isinstance(path, Path))
self.assertTrue(path.ischild(namespace))
date = plugin.date_from_path(path)
self.assertTrue(isinstance(date, dateclass))
self.assertEqual(date, today)
from zim.plugins.calendar import DAY, WEEK, MONTH, YEAR
zim.datetimetz.FIRST_DAY_OF_WEEK = \
zim.datetimetz.MONDAY
plugin.preferences['namespace'] = Path('Calendar')
date = dateclass(2012, 4, 27)
for setting, wanted, start in (
(DAY, 'Calendar:2012:04:27', dateclass(2012, 4, 27)),
(WEEK, 'Calendar:2012:Week 17', dateclass(2012, 4, 23)),
(MONTH, 'Calendar:2012:04', dateclass(2012, 4, 1)),
(YEAR, 'Calendar:2012', dateclass(2012, 1, 1)),
):
plugin.preferences['granularity'] = setting
path = plugin.path_from_date(date)
self.assertEqual(path.name, wanted)
self.assertEqual(plugin.date_from_path(path), start)
path = plugin.path_for_month_from_date(date)
self.assertEqual(path.name, 'Calendar:2012:04')
def testTemplate(self):
pluginklass = PluginManager.get_plugin_class('calendar')
plugin = pluginklass()
plugin.preferences['namespace'] = Path('Calendar')
notebook = tests.new_notebook()
plugin.extend(notebook)
dumper = get_dumper('wiki')
zim.datetimetz.FIRST_DAY_OF_WEEK = \
zim.datetimetz.MONDAY
for path in (
Path('Calendar:2012'),
Path('Calendar:2012:04:27'),
Path('Calendar:2012:Week 17'),
Path('Calendar:2012:04'),
):
tree = notebook.get_template(path)
lines = dumper.dump(tree)
#~ print lines
self.assertTrue(not 'Created' in ''.join(lines)) # No fall back
if 'Week' in path.name:
days = [l for l in lines if l.startswith('=== ')]
self.assertEqual(len(days), 7)
| gpl-2.0 |
ampax/edx-platform | common/djangoapps/edxmako/shortcuts.py | 7 | 5575 | # Copyright (c) 2008 Mikeal Rogers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.template import Context
from django.http import HttpResponse
import logging
from microsite_configuration import microsite
from edxmako import lookup_template
from edxmako.middleware import get_template_request_context
from django.conf import settings
from django.core.urlresolvers import reverse
log = logging.getLogger(__name__)
def marketing_link(name):
"""Returns the correct URL for a link to the marketing site
depending on if the marketing site is enabled
Since the marketing site is enabled by a setting, we have two
possible URLs for certain links. This function is to decides
which URL should be provided.
"""
# link_map maps URLs from the marketing site to the old equivalent on
# the Django site
link_map = settings.MKTG_URL_LINK_MAP
enable_mktg_site = microsite.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
if enable_mktg_site and name in settings.MKTG_URLS:
# special case for when we only want the root marketing URL
if name == 'ROOT':
return settings.MKTG_URLS.get('ROOT')
return settings.MKTG_URLS.get('ROOT') + settings.MKTG_URLS.get(name)
# only link to the old pages when the marketing site isn't on
elif not enable_mktg_site and name in link_map:
# don't try to reverse disabled marketing links
if link_map[name] is not None:
return reverse(link_map[name])
else:
log.debug("Cannot find corresponding link for name: %s", name)
return '#'
def is_any_marketing_link_set(names):
"""
Returns a boolean if any given named marketing links are configured.
"""
return any(is_marketing_link_set(name) for name in names)
def is_marketing_link_set(name):
"""
Returns a boolean if a given named marketing link is configured.
"""
enable_mktg_site = microsite.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
if enable_mktg_site:
return name in settings.MKTG_URLS
else:
return name in settings.MKTG_URL_LINK_MAP
def marketing_link_context_processor(request):
"""
A django context processor to give templates access to marketing URLs
Returns a dict whose keys are the marketing link names usable with the
marketing_link method (e.g. 'ROOT', 'CONTACT', etc.) prefixed with
'MKTG_URL_' and whose values are the corresponding URLs as computed by the
marketing_link method.
"""
return dict(
[
("MKTG_URL_" + k, marketing_link(k))
for k in (
settings.MKTG_URL_LINK_MAP.viewkeys() |
settings.MKTG_URLS.viewkeys()
)
]
)
def microsite_footer_context_processor(request):
"""
Checks the site name to determine whether to use the edX.org footer or the Open Source Footer.
"""
return dict(
[
("IS_REQUEST_IN_MICROSITE", microsite.is_request_in_microsite())
]
)
def render_to_string(template_name, dictionary, context=None, namespace='main'):
# see if there is an override template defined in the microsite
template_name = microsite.get_template_path(template_name)
context_instance = Context(dictionary)
# add dictionary to context_instance
context_instance.update(dictionary or {})
# collapse context_instance to a single dictionary for mako
context_dictionary = {}
context_instance['settings'] = settings
context_instance['EDX_ROOT_URL'] = settings.EDX_ROOT_URL
context_instance['marketing_link'] = marketing_link
context_instance['is_any_marketing_link_set'] = is_any_marketing_link_set
context_instance['is_marketing_link_set'] = is_marketing_link_set
# In various testing contexts, there might not be a current request context.
request_context = get_template_request_context()
if request_context:
for item in request_context:
context_dictionary.update(item)
for item in context_instance:
context_dictionary.update(item)
if context:
context_dictionary.update(context)
# "Fix" CSRF token by evaluating the lazy object
KEY_CSRF_TOKENS = ('csrf_token', 'csrf')
for key in KEY_CSRF_TOKENS:
if key in context_dictionary:
context_dictionary[key] = unicode(context_dictionary[key])
# fetch and render template
template = lookup_template(namespace, template_name)
return template.render_unicode(**context_dictionary)
def render_to_response(template_name, dictionary=None, context_instance=None, namespace='main', **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
lookup.get_template(args[0]).render with the passed arguments.
"""
dictionary = dictionary or {}
return HttpResponse(render_to_string(template_name, dictionary, context_instance, namespace), **kwargs)
| agpl-3.0 |
st0ne/gr-OE2AIP | docs/doxygen/doxyxml/doxyindex.py | 223 | 6551 | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Classes providing more user-friendly interfaces to the doxygen xml
docs than the generated classes provide.
"""
import os
from generated import index
from base import Base
from text import description
class DoxyIndex(Base):
"""
Parses a doxygen xml directory.
"""
__module__ = "gnuradio.utils.doxyxml"
def _parse(self):
if self._parsed:
return
super(DoxyIndex, self)._parse()
self._root = index.parse(os.path.join(self._xml_path, 'index.xml'))
for mem in self._root.compound:
converted = self.convert_mem(mem)
# For files we want the contents to be accessible directly
# from the parent rather than having to go through the file
# object.
if self.get_cls(mem) == DoxyFile:
if mem.name.endswith('.h'):
self._members += converted.members()
self._members.append(converted)
else:
self._members.append(converted)
def generate_swig_doc_i(self):
"""
%feature("docstring") gr_make_align_on_samplenumbers_ss::align_state "
Wraps the C++: gr_align_on_samplenumbers_ss::align_state";
"""
pass
class DoxyCompMem(Base):
kind = None
def __init__(self, *args, **kwargs):
super(DoxyCompMem, self).__init__(*args, **kwargs)
@classmethod
def can_parse(cls, obj):
return obj.kind == cls.kind
def set_descriptions(self, parse_data):
bd = description(getattr(parse_data, 'briefdescription', None))
dd = description(getattr(parse_data, 'detaileddescription', None))
self._data['brief_description'] = bd
self._data['detailed_description'] = dd
class DoxyCompound(DoxyCompMem):
pass
class DoxyMember(DoxyCompMem):
pass
class DoxyFunction(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
kind = 'function'
def _parse(self):
if self._parsed:
return
super(DoxyFunction, self)._parse()
self.set_descriptions(self._parse_data)
self._data['params'] = []
prms = self._parse_data.param
for prm in prms:
self._data['params'].append(DoxyParam(prm))
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
params = property(lambda self: self.data()['params'])
Base.mem_classes.append(DoxyFunction)
class DoxyParam(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
def _parse(self):
if self._parsed:
return
super(DoxyParam, self)._parse()
self.set_descriptions(self._parse_data)
self._data['declname'] = self._parse_data.declname
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
declname = property(lambda self: self.data()['declname'])
class DoxyClass(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'class'
def _parse(self):
if self._parsed:
return
super(DoxyClass, self)._parse()
self.retrieve_data()
if self._error:
return
self.set_descriptions(self._retrieved_data.compounddef)
# Sectiondef.kind tells about whether private or public.
# We just ignore this for now.
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
Base.mem_classes.append(DoxyClass)
class DoxyFile(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'file'
def _parse(self):
if self._parsed:
return
super(DoxyFile, self)._parse()
self.retrieve_data()
self.set_descriptions(self._retrieved_data.compounddef)
if self._error:
return
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
Base.mem_classes.append(DoxyFile)
class DoxyNamespace(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'namespace'
Base.mem_classes.append(DoxyNamespace)
class DoxyGroup(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'group'
def _parse(self):
if self._parsed:
return
super(DoxyGroup, self)._parse()
self.retrieve_data()
if self._error:
return
cdef = self._retrieved_data.compounddef
self._data['title'] = description(cdef.title)
# Process inner groups
grps = cdef.innergroup
for grp in grps:
converted = DoxyGroup.from_refid(grp.refid, top=self.top)
self._members.append(converted)
# Process inner classes
klasses = cdef.innerclass
for kls in klasses:
converted = DoxyClass.from_refid(kls.refid, top=self.top)
self._members.append(converted)
# Process normal members
self.process_memberdefs()
title = property(lambda self: self.data()['title'])
Base.mem_classes.append(DoxyGroup)
class DoxyFriend(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
kind = 'friend'
Base.mem_classes.append(DoxyFriend)
class DoxyOther(Base):
__module__ = "gnuradio.utils.doxyxml"
kinds = set(['variable', 'struct', 'union', 'define', 'typedef', 'enum', 'dir', 'page'])
@classmethod
def can_parse(cls, obj):
return obj.kind in cls.kinds
Base.mem_classes.append(DoxyOther)
| gpl-2.0 |
adammenges/statsmodels | statsmodels/tsa/mlemodel.py | 36 | 2101 | """Base Classes for Likelihood Models in time series analysis
Warning: imports numdifftools
Created on Sun Oct 10 15:00:47 2010
Author: josef-pktd
License: BSD
"""
import numpy as np
try:
import numdifftools as ndt
except ImportError:
pass
from statsmodels.base.model import LikelihoodModel
#copied from sandbox/regression/mle.py
#TODO: I take it this is only a stub and should be included in another
# model class?
class TSMLEModel(LikelihoodModel):
"""
univariate time series model for estimation with maximum likelihood
Note: This is not working yet
"""
def __init__(self, endog, exog=None):
#need to override p,q (nar,nma) correctly
super(TSMLEModel, self).__init__(endog, exog)
#set default arma(1,1)
self.nar = 1
self.nma = 1
#self.initialize()
def geterrors(self, params):
raise NotImplementedError
def loglike(self, params):
"""
Loglikelihood for timeseries model
Notes
-----
needs to be overwritten by subclass
"""
raise NotImplementedError
def score(self, params):
"""
Score vector for Arma model
"""
#return None
#print params
jac = ndt.Jacobian(self.loglike, stepMax=1e-4)
return jac(params)[-1]
def hessian(self, params):
"""
Hessian of arma model. Currently uses numdifftools
"""
#return None
Hfun = ndt.Jacobian(self.score, stepMax=1e-4)
return Hfun(params)[-1]
def fit(self, start_params=None, maxiter=5000, method='fmin', tol=1e-08):
'''estimate model by minimizing negative loglikelihood
does this need to be overwritten ?
'''
if start_params is None and hasattr(self, '_start_params'):
start_params = self._start_params
#start_params = np.concatenate((0.05*np.ones(self.nar + self.nma), [1]))
mlefit = super(TSMLEModel, self).fit(start_params=start_params,
maxiter=maxiter, method=method, tol=tol)
return mlefit
| bsd-3-clause |
Mazecreator/tensorflow | tensorflow/contrib/layers/python/layers/optimizers.py | 12 | 18942 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer ops for use in layers and tf.learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as vars_
from tensorflow.python.summary import summary
from tensorflow.python.training import moving_averages
from tensorflow.python.training import optimizer as optimizer_
from tensorflow.python.training import training as train
OPTIMIZER_CLS_NAMES = {
"Adagrad": train.AdagradOptimizer,
"Adam": train.AdamOptimizer,
"Ftrl": train.FtrlOptimizer,
"Momentum": train.MomentumOptimizer,
"RMSProp": train.RMSPropOptimizer,
"SGD": train.GradientDescentOptimizer,
}
OPTIMIZER_SUMMARIES = [
"learning_rate",
"loss",
"gradients",
"gradient_norm",
"global_gradient_norm",
]
def optimize_loss(loss,
global_step,
learning_rate,
optimizer,
gradient_noise_scale=None,
gradient_multipliers=None,
clip_gradients=None,
learning_rate_decay_fn=None,
update_ops=None,
variables=None,
name=None,
summaries=None,
colocate_gradients_with_ops=False,
increment_global_step=True):
"""Given loss and parameters for optimizer, returns a training op.
Various ways of passing optimizers include:
- by string specifying the name of the optimizer. See OPTIMIZER_CLS_NAMES
for full list. E.g. `optimize_loss(..., optimizer='Adam')`.
- by function taking learning rate `Tensor` as argument and returning an
`Optimizer` instance. E.g. `optimize_loss(...,
optimizer=lambda lr: tf.train.MomentumOptimizer(lr, momentum=0.5))`.
Alternatively, if `learning_rate` is `None`, the function takes no
arguments. E.g. `optimize_loss(..., learning_rate=None,
optimizer=lambda: tf.train.MomentumOptimizer(0.5, momentum=0.5))`.
- by a subclass of `Optimizer` having a single-argument constructor
(the argument is the learning rate), such as AdamOptimizer or
AdagradOptimizer. E.g. `optimize_loss(...,
optimizer=tf.train.AdagradOptimizer)`.
- by an instance of a subclass of `Optimizer`.
E.g., `optimize_loss(..., optimizer=tf.train.AdagradOptimizer(0.5))`.
Args:
loss: Scalar `Tensor`.
global_step: Scalar int `Tensor`, step counter to update on each step
unless `increment_global_step` is `False`. If not supplied,
it will be fetched from the default graph (see
`tf.train.get_global_step` for details). If it has
not been created, no step will be incremented with each weight
update. `learning_rate_decay_fn` requires `global_step`.
learning_rate: float or `Tensor`, magnitude of update per each training
step. Can be `None`.
optimizer: string, class or optimizer instance, used as trainer.
string should be name of optimizer, like 'SGD',
'Adam', 'Adagrad'. Full list in OPTIMIZER_CLS_NAMES constant.
class should be sub-class of `tf.Optimizer` that implements
`compute_gradients` and `apply_gradients` functions.
optimizer instance should be instantiation of `tf.Optimizer`
sub-class and have `compute_gradients` and `apply_gradients`
functions.
gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this
value.
gradient_multipliers: dict of variables or variable names to floats.
If present, gradients for specified
variables will be multiplied by given constant.
clip_gradients: float, callable or `None`. If float, is provided, a global
clipping is applied to prevent the norm of the gradient to exceed this
value. Alternatively, a callable can be provided e.g.: adaptive_clipping.
This callable takes a `list` of `(gradients, variables)` `tuple`s and
returns the same thing with the gradients modified.
learning_rate_decay_fn: function, takes `learning_rate` and `global_step`
`Tensor`s, returns `Tensor`.
Can be used to implement any learning rate decay
functions.
For example: `tf.train.exponential_decay`.
Ignored if `learning_rate` is not supplied.
update_ops: list of update `Operation`s to execute at each step. If `None`,
uses elements of UPDATE_OPS collection. The order of execution
between `update_ops` and `loss` is non-deterministic.
variables: list of variables to optimize or
`None` to use all trainable variables.
name: The name for this operation is used to scope operations and summaries.
summaries: List of internal quantities to visualize on tensorboard. If not
set, the loss, the learning rate, and the global norm of the
gradients will be reported. The complete list of possible values
is in OPTIMIZER_SUMMARIES.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
increment_global_step: Whether to increment `global_step`. If your model
calls `optimize_loss` multiple times per training step (e.g. to optimize
different parts of the model), use this arg to avoid incrementing
`global_step` more times than necessary.
Returns:
Training op.
Raises:
ValueError: if:
* `loss` is an invalid type or shape.
* `global_step` is an invalid type or shape.
* `learning_rate` is an invalid type or value.
* `optimizer` has the wrong type.
* `clip_gradients` is neither float nor callable.
* `learning_rate` and `learning_rate_decay_fn` are supplied, but no
`global_step` is available.
* `gradients` is empty.
"""
loss = ops.convert_to_tensor(loss)
contrib_framework.assert_scalar(loss)
if global_step is None:
global_step = train.get_global_step()
else:
train.assert_global_step(global_step)
with vs.variable_scope(name, "OptimizeLoss", [loss, global_step]):
# Update ops take UPDATE_OPS collection if not provided.
if update_ops is None:
update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
# Make sure update ops are ran before computing loss.
if update_ops:
loss = control_flow_ops.with_dependencies(list(update_ops), loss)
# Learning rate variable, with possible decay.
lr = None
if learning_rate is not None:
if (isinstance(learning_rate, ops.Tensor) and
learning_rate.get_shape().ndims == 0):
lr = learning_rate
elif isinstance(learning_rate, float):
if learning_rate < 0.0:
raise ValueError("Invalid learning_rate %s.", learning_rate)
lr = vs.get_variable(
"learning_rate", [],
trainable=False,
initializer=init_ops.constant_initializer(learning_rate))
else:
raise ValueError("Learning rate should be 0d Tensor or float. "
"Got %s of type %s" % (str(learning_rate),
str(type(learning_rate))))
if summaries is None:
summaries = ["loss", "learning_rate", "global_gradient_norm"]
else:
for summ in summaries:
if summ not in OPTIMIZER_SUMMARIES:
raise ValueError("Summaries should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_SUMMARIES), summ))
if learning_rate is not None and learning_rate_decay_fn is not None:
if global_step is None:
raise ValueError("global_step is required for learning_rate_decay_fn.")
lr = learning_rate_decay_fn(lr, global_step)
if "learning_rate" in summaries:
summary.scalar("learning_rate", lr)
# Create optimizer, given specified parameters.
if isinstance(optimizer, six.string_types):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is string (%s)." % optimizer)
if optimizer not in OPTIMIZER_CLS_NAMES:
raise ValueError(
"Optimizer name should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_CLS_NAMES), optimizer))
opt = OPTIMIZER_CLS_NAMES[optimizer](learning_rate=lr)
elif (isinstance(optimizer, type) and
issubclass(optimizer, optimizer_.Optimizer)):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is class (%s)." % optimizer)
opt = optimizer(learning_rate=lr)
elif isinstance(optimizer, optimizer_.Optimizer):
opt = optimizer
elif callable(optimizer):
if learning_rate is not None:
opt = optimizer(lr)
else:
opt = optimizer()
if not isinstance(opt, optimizer_.Optimizer):
raise ValueError("Unrecognized optimizer: function should return "
"subclass of Optimizer. Got %s." % str(opt))
else:
raise ValueError("Unrecognized optimizer: should be string, "
"subclass of Optimizer, instance of "
"subclass of Optimizer or function with one argument. "
"Got %s." % str(optimizer))
# All trainable variables, if specific variables are not specified.
if variables is None:
variables = vars_.trainable_variables()
# Compute gradients.
gradients = opt.compute_gradients(
loss,
variables,
colocate_gradients_with_ops=colocate_gradients_with_ops)
# Optionally add gradient noise.
if gradient_noise_scale is not None:
gradients = _add_scaled_noise_to_gradients(gradients,
gradient_noise_scale)
# Multiply some gradients.
if gradient_multipliers is not None:
gradients = _multiply_gradients(gradients, gradient_multipliers)
if not gradients:
raise ValueError(
"Empty list of (gradient, var) pairs encountered. This is most "
"likely to be caused by an improper value of gradient_multipliers.")
if "global_gradient_norm" in summaries or "gradient_norm" in summaries:
summary.scalar("global_norm/gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Optionally clip gradients by global norm.
if isinstance(clip_gradients, float):
gradients = _clip_gradients_by_norm(gradients, clip_gradients)
elif callable(clip_gradients):
gradients = clip_gradients(gradients)
elif clip_gradients is not None:
raise ValueError(
"Unknown type %s for clip_gradients" % type(clip_gradients))
# Add scalar summary for loss.
if "loss" in summaries:
summary.scalar("loss", loss)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in gradients:
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
var_name = variable.name.replace(":", "_")
if "gradients" in summaries:
summary.histogram("gradients/%s" % var_name, grad_values)
if "gradient_norm" in summaries:
summary.scalar("gradient_norm/%s" % var_name,
clip_ops.global_norm([grad_values]))
if clip_gradients is not None and ("global_gradient_norm" in summaries or
"gradient_norm" in summaries):
summary.scalar("global_norm/clipped_gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Create gradient updates.
grad_updates = opt.apply_gradients(
gradients,
global_step=global_step if increment_global_step else None,
name="train")
# Ensure the train_tensor computes grad_updates.
train_tensor = control_flow_ops.with_dependencies([grad_updates], loss)
return train_tensor
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients)
return list(zip(clipped_gradients, variables))
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
"""Find max_norm given norm and previous average."""
with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
log_norm = math_ops.log(norm + epsilon)
def moving_average(name, value, decay):
moving_average_variable = vs.get_variable(
name,
shape=value.get_shape(),
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False)
return moving_averages.assign_moving_average(
moving_average_variable, value, decay, zero_debias=False)
# quicker adaptation at the beginning
if global_step is not None:
n = math_ops.to_float(global_step)
decay = math_ops.minimum(decay, n / (n + 1.))
# update averages
mean = moving_average("mean", log_norm, decay)
sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)
variance = sq_mean - math_ops.square(mean)
std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
max_norms = math_ops.exp(mean + std_factor * std)
return max_norms, mean
def adaptive_clipping_fn(std_factor=2.,
decay=0.95,
static_max_norm=None,
global_step=None,
report_summary=False,
epsilon=1e-8,
name=None):
"""Adapt the clipping value using statistics on the norms.
Implement adaptive gradient as presented in section 3.2.1 of
https://arxiv.org/abs/1412.1602.
Keeps a moving average of the mean and std of the log(norm) of the gradient.
If the norm exceeds `exp(mean + std_factor*std)` then all gradients will be
rescaled such that the global norm becomes `exp(mean)`.
Args:
std_factor: Python scaler (or tensor).
`max_norm = exp(mean + std_factor*std)`
decay: The smoothing factor of the moving averages.
static_max_norm: If provided, will threshold the norm to this value as an
extra safety.
global_step: Optional global_step. If provided, `decay = decay*n/(n+1)`.
This provides a quicker adaptation of the mean for the first steps.
report_summary: If `True`, will add histogram summaries of the `max_norm`.
epsilon: Small value chosen to avoid zero variance.
name: The name for this operation is used to scope operations and summaries.
Returns:
A function for applying gradient clipping.
"""
def gradient_clipping(grads_and_vars):
"""Internal function for adaptive clipping."""
grads, variables = zip(*grads_and_vars)
norm = clip_ops.global_norm(grads)
max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay,
global_step, epsilon, name)
# reports the max gradient norm for debugging
if report_summary:
summary.scalar("global_norm/adaptive_max_gradient_norm", max_norm)
# factor will be 1. if norm is smaller than max_norm
factor = array_ops.where(norm < max_norm,
array_ops.ones_like(norm),
math_ops.exp(log_mean) / norm)
if static_max_norm is not None:
factor = math_ops.minimum(static_max_norm / norm, factor)
# apply factor
clipped_grads = []
for grad in grads:
if grad is None:
clipped_grads.append(None)
elif isinstance(grad, ops.IndexedSlices):
clipped_grads.append(
ops.IndexedSlices(grad.values * factor, grad.indices,
grad.dense_shape))
else:
clipped_grads.append(grad * factor)
return list(zip(clipped_grads, variables))
return gradient_clipping
def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale):
"""Adds scaled noise from a 0-mean normal distribution to gradients."""
gradients, variables = zip(*grads_and_vars)
noisy_gradients = []
for gradient in gradients:
if gradient is None:
noisy_gradients.append(None)
continue
if isinstance(gradient, ops.IndexedSlices):
gradient_shape = gradient.dense_shape
else:
gradient_shape = gradient.get_shape()
noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale
noisy_gradients.append(gradient + noise)
return list(zip(noisy_gradients, variables))
def _multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients."""
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if (grad is not None and
(var in gradient_multipliers or var.name in gradient_multipliers)):
key = var if var in gradient_multipliers else var.name
multiplier = constant_op.constant(
gradient_multipliers[key], dtype=dtypes.float32)
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values * multiplier
grad = ops.IndexedSlices(grad_values, grad.indices, grad.dense_shape)
else:
grad *= multiplier
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
| apache-2.0 |
samdoran/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py | 69 | 20312 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = '''
module: ec2_vpc_vgw
short_description: Create and delete AWS VPN Virtual Gateways.
description:
- Creates AWS VPN Virtual Gateways
- Deletes AWS VPN Virtual Gateways
- Attaches Virtual Gateways to VPCs
- Detaches Virtual Gateways from VPCs
version_added: "2.2"
requirements: [ boto3 ]
options:
state:
description:
- present to ensure resource is created.
- absent to remove resource
required: false
default: present
choices: [ "present", "absent"]
name:
description:
- name of the vgw to be created or deleted
required: false
type:
description:
- type of the virtual gateway to be created
required: false
choices: [ "ipsec.1" ]
vpn_gateway_id:
description:
- vpn gateway id of an existing virtual gateway
required: false
vpc_id:
description:
- the vpc-id of a vpc to attach or detach
required: false
wait_timeout:
description:
- number of seconds to wait for status during vpc attach and detach
required: false
default: 320
tags:
description:
- dictionary of resource tags
required: false
default: null
aliases: [ "resource_tags" ]
author: Nick Aslanidis (@naslanidis)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Create a new vgw attached to a specific VPC
ec2_vpc_vgw:
state: present
region: ap-southeast-2
profile: personal
vpc_id: vpc-12345678
name: personal-testing
type: ipsec.1
register: created_vgw
- name: Create a new unattached vgw
ec2_vpc_vgw:
state: present
region: ap-southeast-2
profile: personal
name: personal-testing
type: ipsec.1
tags:
environment: production
owner: ABC
register: created_vgw
- name: Remove a new vgw using the name
ec2_vpc_vgw:
state: absent
region: ap-southeast-2
profile: personal
name: personal-testing
type: ipsec.1
register: deleted_vgw
- name: Remove a new vgw using the vpn_gateway_id
ec2_vpc_vgw:
state: absent
region: ap-southeast-2
profile: personal
vpn_gateway_id: vgw-3a9aa123
register: deleted_vgw
'''
RETURN = '''
result:
description: The result of the create, or delete action.
returned: success
type: dictionary
'''
try:
import json
import time
import botocore
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def get_vgw_info(vgws):
if not isinstance(vgws, list):
return
for vgw in vgws:
vgw_info = {
'id': vgw['VpnGatewayId'],
'type': vgw['Type'],
'state': vgw['State'],
'vpc_id': None,
'tags': dict()
}
for tag in vgw['Tags']:
vgw_info['tags'][tag['Key']] = tag['Value']
if len(vgw['VpcAttachments']) != 0 and vgw['VpcAttachments'][0]['State'] == 'attached':
vgw_info['vpc_id'] = vgw['VpcAttachments'][0]['VpcId']
return vgw_info
def wait_for_status(client, module, vpn_gateway_id, status):
polling_increment_secs = 15
max_retries = (module.params.get('wait_timeout') / polling_increment_secs)
status_achieved = False
for x in range(0, max_retries):
try:
response = find_vgw(client, module, vpn_gateway_id)
if response[0]['VpcAttachments'][0]['State'] == status:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return status_achieved, result
def attach_vgw(client, module, vpn_gateway_id):
params = dict()
params['VpcId'] = module.params.get('vpc_id')
try:
response = client.attach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'attached')
if not status_achieved:
module.fail_json(msg='Error waiting for vpc to attach to vgw - please check the AWS console')
result = response
return result
def detach_vgw(client, module, vpn_gateway_id, vpc_id=None):
params = dict()
params['VpcId'] = module.params.get('vpc_id')
if vpc_id:
try:
response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id)
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
else:
try:
response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'detached')
if not status_achieved:
module.fail_json(msg='Error waiting for vpc to detach from vgw - please check the AWS console')
result = response
return result
def create_vgw(client, module):
params = dict()
params['Type'] = module.params.get('type')
try:
response = client.create_vpn_gateway(Type=params['Type'])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return result
def delete_vgw(client, module, vpn_gateway_id):
try:
response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id)
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
#return the deleted VpnGatewayId as this is not included in the above response
result = vpn_gateway_id
return result
def create_tags(client, module, vpn_gateway_id):
params = dict()
try:
response = client.create_tags(Resources=[vpn_gateway_id],Tags=load_tags(module))
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return result
def delete_tags(client, module, vpn_gateway_id, tags_to_delete=None):
params = dict()
if tags_to_delete:
try:
response = client.delete_tags(Resources=[vpn_gateway_id], Tags=tags_to_delete)
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
else:
try:
response = client.delete_tags(Resources=[vpn_gateway_id])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return result
def load_tags(module):
tags = []
if module.params.get('tags'):
for name, value in module.params.get('tags').items():
tags.append({'Key': name, 'Value': str(value)})
tags.append({'Key': "Name", 'Value': module.params.get('name')})
else:
tags.append({'Key': "Name", 'Value': module.params.get('name')})
return tags
def find_tags(client, module, resource_id=None):
if resource_id:
try:
response = client.describe_tags(Filters=[
{'Name': 'resource-id', 'Values': [resource_id]}
])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return result
def check_tags(client, module, existing_vgw, vpn_gateway_id):
params = dict()
params['Tags'] = module.params.get('tags')
vgw = existing_vgw
changed = False
tags_list = {}
#format tags for comparison
for tags in existing_vgw[0]['Tags']:
if tags['Key'] != 'Name':
tags_list[tags['Key']] = tags['Value']
# if existing tags don't match the tags arg, delete existing and recreate with new list
if params['Tags'] is not None and tags_list != params['Tags']:
delete_tags(client, module, vpn_gateway_id)
create_tags(client, module, vpn_gateway_id)
vgw = find_vgw(client, module)
changed = True
#if no tag args are supplied, delete any existing tags with the exception of the name tag
if params['Tags'] is None and tags_list != {}:
tags_to_delete = []
for tags in existing_vgw[0]['Tags']:
if tags['Key'] != 'Name':
tags_to_delete.append(tags)
delete_tags(client, module, vpn_gateway_id, tags_to_delete)
vgw = find_vgw(client, module)
changed = True
return vgw, changed
def find_vpc(client, module):
params = dict()
params['vpc_id'] = module.params.get('vpc_id')
if params['vpc_id']:
try:
response = client.describe_vpcs(VpcIds=[params['vpc_id']])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return result
def find_vgw(client, module, vpn_gateway_id=None):
params = dict()
params['Name'] = module.params.get('name')
params['Type'] = module.params.get('type')
params['State'] = module.params.get('state')
if params['State'] == 'present':
try:
response = client.describe_vpn_gateways(Filters=[
{'Name': 'type', 'Values': [params['Type']]},
{'Name': 'tag:Name', 'Values': [params['Name']]}
])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
else:
if vpn_gateway_id:
try:
response = client.describe_vpn_gateways(VpnGatewayIds=vpn_gateway_id)
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
else:
try:
response = client.describe_vpn_gateways(Filters=[
{'Name': 'type', 'Values': [params['Type']]},
{'Name': 'tag:Name', 'Values': [params['Name']]}
])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response['VpnGateways']
return result
def ensure_vgw_present(client, module):
# If an existing vgw name and type matches our args, then a match is considered to have been
# found and we will not create another vgw.
changed = False
params = dict()
result = dict()
params['Name'] = module.params.get('name')
params['VpcId'] = module.params.get('vpc_id')
params['Type'] = module.params.get('type')
params['Tags'] = module.params.get('tags')
params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
# Check that a name argument has been supplied.
if not module.params.get('name'):
module.fail_json(msg='A name is required when a status of \'present\' is suppled')
# check if a gateway matching our module args already exists
existing_vgw = find_vgw(client, module)
if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted':
vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
vgw, changed = check_tags(client, module, existing_vgw, vpn_gateway_id)
# if a vpc_id was provided, check if it exists and if it's attached
if params['VpcId']:
# check that the vpc_id exists. If not, an exception is thrown
vpc = find_vpc(client, module)
current_vpc_attachments = existing_vgw[0]['VpcAttachments']
if current_vpc_attachments != [] and current_vpc_attachments[0]['State'] == 'attached':
if current_vpc_attachments[0]['VpcId'] == params['VpcId'] and current_vpc_attachments[0]['State'] == 'attached':
changed = False
else:
# detach the existing vpc from the virtual gateway
vpc_to_detach = current_vpc_attachments[0]['VpcId']
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
time.sleep(5)
attached_vgw = attach_vgw(client, module, vpn_gateway_id)
vgw = find_vgw(client, module, [vpn_gateway_id])
changed = True
else:
# attach the vgw to the supplied vpc
attached_vgw = attach_vgw(client, module, vpn_gateway_id)
vgw = find_vgw(client, module, [vpn_gateway_id])
changed = True
# if params['VpcId'] is not provided, check the vgw is attached to a vpc. if so, detach it.
else:
existing_vgw = find_vgw(client, module, [vpn_gateway_id])
if existing_vgw[0]['VpcAttachments'] != []:
if existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
# detach the vpc from the vgw
vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
changed = True
vgw = find_vgw(client, module, [vpn_gateway_id])
else:
# create a new vgw
new_vgw = create_vgw(client, module)
changed = True
vpn_gateway_id = new_vgw['VpnGateway']['VpnGatewayId']
# tag the new virtual gateway
create_tags(client, module, vpn_gateway_id)
# return current state of the vgw
vgw = find_vgw(client, module, [vpn_gateway_id])
# if a vpc-id was supplied, attempt to attach it to the vgw
if params['VpcId']:
attached_vgw = attach_vgw(client, module, vpn_gateway_id)
changed = True
vgw = find_vgw(client, module, [vpn_gateway_id])
result = get_vgw_info(vgw)
return changed, result
def ensure_vgw_absent(client, module):
# If an existing vgw name and type matches our args, then a match is considered to have been
# found and we will take steps to delete it.
changed = False
params = dict()
result = dict()
params['Name'] = module.params.get('name')
params['VpcId'] = module.params.get('vpc_id')
params['Type'] = module.params.get('type')
params['Tags'] = module.params.get('tags')
params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
# check if a gateway matching our module args already exists
if params['VpnGatewayIds']:
existing_vgw_with_id = find_vgw(client, module, [params['VpnGatewayIds']])
if existing_vgw_with_id != [] and existing_vgw_with_id[0]['State'] != 'deleted':
existing_vgw = existing_vgw_with_id
if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
if params['VpcId']:
if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
else:
# detach the vpc from the vgw
detach_vgw(client, module, params['VpnGatewayIds'], params['VpcId'])
deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
changed = True
else:
# attempt to detach any attached vpcs
vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
detach_vgw(client, module, params['VpnGatewayIds'], vpc_to_detach)
deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
changed = True
else:
# no vpc's are attached so attempt to delete the vgw
deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
changed = True
else:
changed = False
deleted_vgw = "Nothing to do"
else:
#Check that a name and type argument has been supplied if no vgw-id
if not module.params.get('name') or not module.params.get('type'):
module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is suppled')
existing_vgw = find_vgw(client, module)
if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted':
vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
if params['VpcId']:
if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
else:
# detach the vpc from the vgw
detach_vgw(client, module, vpn_gateway_id, params['VpcId'])
#now that the vpc has been detached, delete the vgw
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
changed = True
else:
# attempt to detach any attached vpcs
vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
changed = True
#now that the vpc has been detached, delete the vgw
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
else:
# no vpc's are attached so attempt to delete the vgw
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
changed = True
else:
changed = False
deleted_vgw = None
result = deleted_vgw
return changed, result
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'absent']),
region=dict(required=True),
name=dict(),
vpn_gateway_id=dict(),
vpc_id=dict(),
wait_timeout=dict(type='int', default=320),
type=dict(default='ipsec.1', choices=['ipsec.1']),
tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='json and boto3 is required.')
state = module.params.get('state').lower()
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError:
e = get_exception()
module.fail_json(msg="Can't authorize connection - "+str(e))
if state == 'present':
(changed, results) = ensure_vgw_present(client, module)
else:
(changed, results) = ensure_vgw_absent(client, module)
module.exit_json(changed=changed, vgw=results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
CamelBackNotation/CarnotKE | jyhton/lib-python/2.7/ctypes/test/test_byteswap.py | 59 | 10893 | import sys, unittest, struct, math, ctypes
from binascii import hexlify
from ctypes import *
def bin(s):
return hexlify(memoryview(s)).upper()
# Each *simple* type that supports different byte orders has an
# __ctype_be__ attribute that specifies the same type in BIG ENDIAN
# byte order, and a __ctype_le__ attribute that is the same type in
# LITTLE ENDIAN byte order.
#
# For Structures and Unions, these types are created on demand.
class Test(unittest.TestCase):
def X_test(self):
print >> sys.stderr, sys.byteorder
for i in range(32):
bits = BITS()
setattr(bits, "i%s" % i, 1)
dump(bits)
def test_endian_short(self):
if sys.byteorder == "little":
self.assertTrue(c_short.__ctype_le__ is c_short)
self.assertTrue(c_short.__ctype_be__.__ctype_le__ is c_short)
else:
self.assertTrue(c_short.__ctype_be__ is c_short)
self.assertTrue(c_short.__ctype_le__.__ctype_be__ is c_short)
s = c_short.__ctype_be__(0x1234)
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.assertEqual(bin(s), "1234")
self.assertEqual(s.value, 0x1234)
s = c_short.__ctype_le__(0x1234)
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.assertEqual(bin(s), "3412")
self.assertEqual(s.value, 0x1234)
s = c_ushort.__ctype_be__(0x1234)
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.assertEqual(bin(s), "1234")
self.assertEqual(s.value, 0x1234)
s = c_ushort.__ctype_le__(0x1234)
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.assertEqual(bin(s), "3412")
self.assertEqual(s.value, 0x1234)
def test_endian_int(self):
if sys.byteorder == "little":
self.assertTrue(c_int.__ctype_le__ is c_int)
self.assertTrue(c_int.__ctype_be__.__ctype_le__ is c_int)
else:
self.assertTrue(c_int.__ctype_be__ is c_int)
self.assertTrue(c_int.__ctype_le__.__ctype_be__ is c_int)
s = c_int.__ctype_be__(0x12345678)
self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678")
self.assertEqual(bin(s), "12345678")
self.assertEqual(s.value, 0x12345678)
s = c_int.__ctype_le__(0x12345678)
self.assertEqual(bin(struct.pack("<i", 0x12345678)), "78563412")
self.assertEqual(bin(s), "78563412")
self.assertEqual(s.value, 0x12345678)
s = c_uint.__ctype_be__(0x12345678)
self.assertEqual(bin(struct.pack(">I", 0x12345678)), "12345678")
self.assertEqual(bin(s), "12345678")
self.assertEqual(s.value, 0x12345678)
s = c_uint.__ctype_le__(0x12345678)
self.assertEqual(bin(struct.pack("<I", 0x12345678)), "78563412")
self.assertEqual(bin(s), "78563412")
self.assertEqual(s.value, 0x12345678)
def test_endian_longlong(self):
if sys.byteorder == "little":
self.assertTrue(c_longlong.__ctype_le__ is c_longlong)
self.assertTrue(c_longlong.__ctype_be__.__ctype_le__ is c_longlong)
else:
self.assertTrue(c_longlong.__ctype_be__ is c_longlong)
self.assertTrue(c_longlong.__ctype_le__.__ctype_be__ is c_longlong)
s = c_longlong.__ctype_be__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.assertEqual(bin(s), "1234567890ABCDEF")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_longlong.__ctype_le__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack("<q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.assertEqual(bin(s), "EFCDAB9078563412")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_be__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack(">Q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.assertEqual(bin(s), "1234567890ABCDEF")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_le__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack("<Q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.assertEqual(bin(s), "EFCDAB9078563412")
self.assertEqual(s.value, 0x1234567890ABCDEF)
def test_endian_float(self):
if sys.byteorder == "little":
self.assertTrue(c_float.__ctype_le__ is c_float)
self.assertTrue(c_float.__ctype_be__.__ctype_le__ is c_float)
else:
self.assertTrue(c_float.__ctype_be__ is c_float)
self.assertTrue(c_float.__ctype_le__.__ctype_be__ is c_float)
s = c_float(math.pi)
self.assertEqual(bin(struct.pack("f", math.pi)), bin(s))
# Hm, what's the precision of a float compared to a double?
self.assertAlmostEqual(s.value, math.pi, 6)
s = c_float.__ctype_le__(math.pi)
self.assertAlmostEqual(s.value, math.pi, 6)
self.assertEqual(bin(struct.pack("<f", math.pi)), bin(s))
s = c_float.__ctype_be__(math.pi)
self.assertAlmostEqual(s.value, math.pi, 6)
self.assertEqual(bin(struct.pack(">f", math.pi)), bin(s))
def test_endian_double(self):
if sys.byteorder == "little":
self.assertTrue(c_double.__ctype_le__ is c_double)
self.assertTrue(c_double.__ctype_be__.__ctype_le__ is c_double)
else:
self.assertTrue(c_double.__ctype_be__ is c_double)
self.assertTrue(c_double.__ctype_le__.__ctype_be__ is c_double)
s = c_double(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("d", math.pi)), bin(s))
s = c_double.__ctype_le__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("<d", math.pi)), bin(s))
s = c_double.__ctype_be__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s))
def test_endian_other(self):
self.assertTrue(c_byte.__ctype_le__ is c_byte)
self.assertTrue(c_byte.__ctype_be__ is c_byte)
self.assertTrue(c_ubyte.__ctype_le__ is c_ubyte)
self.assertTrue(c_ubyte.__ctype_be__ is c_ubyte)
self.assertTrue(c_char.__ctype_le__ is c_char)
self.assertTrue(c_char.__ctype_be__ is c_char)
def test_struct_fields_1(self):
if sys.byteorder == "little":
base = BigEndianStructure
else:
base = LittleEndianStructure
class T(base):
pass
_fields_ = [("a", c_ubyte),
("b", c_byte),
("c", c_short),
("d", c_ushort),
("e", c_int),
("f", c_uint),
("g", c_long),
("h", c_ulong),
("i", c_longlong),
("k", c_ulonglong),
("l", c_float),
("m", c_double),
("n", c_char),
("b1", c_byte, 3),
("b2", c_byte, 3),
("b3", c_byte, 2),
("a", c_int * 3 * 3 * 3)]
T._fields_ = _fields_
# these fields do not support different byte order:
for typ in c_wchar, c_void_p, POINTER(c_int):
_fields_.append(("x", typ))
class T(base):
pass
self.assertRaises(TypeError, setattr, T, "_fields_", [("x", typ)])
def test_struct_struct(self):
# nested structures with different byteorders
# create nested structures with given byteorders and set memory to data
for nested, data in (
(BigEndianStructure, b'\0\0\0\1\0\0\0\2'),
(LittleEndianStructure, b'\1\0\0\0\2\0\0\0'),
):
for parent in (
BigEndianStructure,
LittleEndianStructure,
Structure,
):
class NestedStructure(nested):
_fields_ = [("x", c_uint32),
("y", c_uint32)]
class TestStructure(parent):
_fields_ = [("point", NestedStructure)]
self.assertEqual(len(data), sizeof(TestStructure))
ptr = POINTER(TestStructure)
s = cast(data, ptr)[0]
del ctypes._pointer_type_cache[TestStructure]
self.assertEqual(s.point.x, 1)
self.assertEqual(s.point.y, 2)
def test_struct_fields_2(self):
# standard packing in struct uses no alignment.
# So, we have to align using pad bytes.
#
# Unaligned accesses will crash Python (on those platforms that
# don't allow it, like sparc solaris).
if sys.byteorder == "little":
base = BigEndianStructure
fmt = ">bxhid"
else:
base = LittleEndianStructure
fmt = "<bxhid"
class S(base):
_fields_ = [("b", c_byte),
("h", c_short),
("i", c_int),
("d", c_double)]
s1 = S(0x12, 0x1234, 0x12345678, 3.14)
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
def test_unaligned_nonnative_struct_fields(self):
if sys.byteorder == "little":
base = BigEndianStructure
fmt = ">b h xi xd"
else:
base = LittleEndianStructure
fmt = "<b h xi xd"
class S(base):
_pack_ = 1
_fields_ = [("b", c_byte),
("h", c_short),
("_1", c_byte),
("i", c_int),
("_2", c_byte),
("d", c_double)]
s1 = S()
s1.b = 0x12
s1.h = 0x1234
s1.i = 0x12345678
s1.d = 3.14
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
def test_unaligned_native_struct_fields(self):
if sys.byteorder == "little":
fmt = "<b h xi xd"
else:
base = LittleEndianStructure
fmt = ">b h xi xd"
class S(Structure):
_pack_ = 1
_fields_ = [("b", c_byte),
("h", c_short),
("_1", c_byte),
("i", c_int),
("_2", c_byte),
("d", c_double)]
s1 = S()
s1.b = 0x12
s1.h = 0x1234
s1.i = 0x12345678
s1.d = 3.14
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
vsajip/django | django/db/models/__init__.py | 115 | 1456 | from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured
from django.db import connection
from django.db.models.loading import get_apps, get_app, get_models, get_model, register_models
from django.db.models.query import Q
from django.db.models.expressions import F
from django.db.models.manager import Manager
from django.db.models.base import Model
from django.db.models.aggregates import *
from django.db.models.fields import *
from django.db.models.fields.subclassing import SubfieldBase
from django.db.models.fields.files import FileField, ImageField
from django.db.models.fields.related import ForeignKey, OneToOneField, ManyToManyField, ManyToOneRel, ManyToManyRel, OneToOneRel
from django.db.models.deletion import CASCADE, PROTECT, SET, SET_NULL, SET_DEFAULT, DO_NOTHING, ProtectedError
from django.db.models import signals
from django.utils.decorators import wraps
def permalink(func):
"""
Decorator that calls urlresolvers.reverse() to return a URL using
parameters returned by the decorated function "func".
"func" should be a function that returns a tuple in one of the
following formats:
(viewname, viewargs)
(viewname, viewargs, viewkwargs)
"""
from django.core.urlresolvers import reverse
@wraps(func)
def inner(*args, **kwargs):
bits = func(*args, **kwargs)
return reverse(bits[0], None, *bits[1:3])
return inner
| bsd-3-clause |
refeed/coala-bears | bears/java/CheckstyleBear.py | 3 | 4182 | from coalib.bearlib.abstractions.Linter import linter
from coalib.settings.Setting import path
from dependency_management.requirements.DistributionRequirement import (
DistributionRequirement)
_online_styles = {
'android-check-easy': 'https://raw.githubusercontent.com/noveogroup/' +
'android-check/master/android-check-plugin/src/main/resources/' +
'checkstyle/checkstyle-easy.xml',
'android-check-hard': 'https://raw.githubusercontent.com/noveogroup/' +
'android-check/master/android-check-plugin/src/main/resources/' +
'checkstyle/checkstyle-hard.xml',
}
_checkstyle_version = '6.19'
# To be deprecated
known_checkstyles = dict(_online_styles, **{'google': None, 'sun': None})
def check_invalid_configuration(checkstyle_configs, use_spaces, indent_size):
if (checkstyle_configs is 'google' and
(not use_spaces or indent_size != 2)):
raise ValueError('Google checkstyle config does not support '
'use_spaces=False or indent_size != 2')
def known_checkstyle_or_path(setting):
if str(setting) in known_checkstyles.keys():
return str(setting)
else:
return path(setting)
@linter(executable='java',
normalize_column_numbers=True,
remove_zero_numbers=True,
output_format='regex',
output_regex=r'\[(?P<severity>ERROR|WARN|INFO)\].*?'
r'(?P<line>\d+):?(?P<column>\d+)?. '
r'(?P<message>.*?) *\[(?P<origin>[a-zA-Z]+?)\]')
class CheckstyleBear:
"""
Check Java code for possible style, semantic and design issues.
For more information, consult
<http://checkstyle.sourceforge.net/checks.html>.
"""
LANGUAGES = {'Java'}
REQUIREMENTS = {DistributionRequirement(apt_get='default-jre')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Formatting', 'Smell'}
def setup_dependencies(self):
version = _checkstyle_version
type(self).checkstyle_jar_file = self.download_cached_file(
'https://github.com/checkstyle/checkstyle/releases/download/'
'checkstyle-%s/checkstyle-%s-all.jar' % (version, version),
'checkstyle-6.19.jar')
def create_arguments(
self, filename, file, config_file,
checkstyle_configs: known_checkstyle_or_path = 'google',
use_spaces: bool = True,
indent_size: int = 2,
):
"""
:param checkstyle_configs:
A file containing configs to use in ``checkstyle``. It can also
have the special values:
- google - Google's Java style. More info at
<http://checkstyle.sourceforge.net/style_configs.html>.
- sun - Sun's Java style. These are the same
as the default Eclipse checks. More info at
<http://checkstyle.sourceforge.net/style_configs.html>.
- android-check-easy - The easy Android configs provided by the
android-check eclipse plugin. More info at
<https://github.com/noveogroup/android-check>.
- android-check-hard - The hard Android confis provided by the
android-check eclipse plugin. More info at
<https://github.com/noveogroup/android-check>.
- geosoft - The Java style followed by GeoSoft. More info at
<http://geosoft.no/development/javastyle.html>
"""
check_invalid_configuration(
checkstyle_configs, use_spaces, indent_size)
if checkstyle_configs in ('google', 'sun'):
# Checkstyle included these two rulesets since version 6.2
# Locate the file as an absolute resource into the checkstyle jar
checkstyle_configs = '/%s_checks.xml' % checkstyle_configs
elif checkstyle_configs in _online_styles:
checkstyle_configs = self.download_cached_file(
_online_styles[checkstyle_configs],
checkstyle_configs + '.xml')
return ('-jar', self.checkstyle_jar_file, '-c',
checkstyle_configs, filename)
| agpl-3.0 |
YarivCol/mbed-os | tools/compliance/ioper_base.py | 106 | 1992 | """
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.Wirkus@arm.com>
"""
import sys
try:
from colorama import Fore
except:
pass
COLORAMA = 'colorama' in sys.modules
class IOperTestCaseBase():
""" Interoperability test case base class
@return list of tuple (severity, Description)
Example: (result.append((IOperTestSeverity.INFO, ""))
"""
def __init__(self, scope=None):
self.PASS = 'PASS'
self.INFO = 'INFO'
self.ERROR = 'ERROR'
self.WARN = 'WARN'
self.scope = scope # Default test scope (basic, pedantic, mbed-enabled etc...)
def test(self, param=None):
result = []
return result
def RED(self, text):
return self.color_text(text, color=Fore.RED, delim=Fore.RESET) if COLORAMA else text
def GREEN(self, text):
return self.color_text(text, color=Fore.GREEN, delim=Fore.RESET) if COLORAMA else text
def YELLOW(self, text):
return self.color_text(text, color=Fore.YELLOW, delim=Fore.RESET) if COLORAMA else text
def color_text(self, text, color='', delim=''):
return color + text + color + delim
def COLOR(self, severity, text):
colors = {
self.PASS : self.GREEN,
self.ERROR : self.RED,
self.WARN : self.YELLOW
}
if severity in colors:
return colors[severity](text)
return text
| apache-2.0 |
tsdmgz/ansible | lib/ansible/modules/network/nxos/nxos_igmp.py | 23 | 5057 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_igmp
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages IGMP global configuration.
description:
- Manages IGMP global configuration configuration settings.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- When C(state=default), all supported params will be reset to a
default state.
- If restart is set to true with other params set, the restart will happen
last, i.e. after the configuration takes place.
options:
flush_routes:
description:
- Removes routes when the IGMP process is restarted. By default,
routes are not flushed.
required: false
default: null
choices: ['true', 'false']
enforce_rtr_alert:
description:
- Enables or disables the enforce router alert option check for
IGMPv2 and IGMPv3 packets.
required: false
default: null
choices: ['true', 'false']
restart:
description:
- Restarts the igmp process (using an exec config command).
required: false
default: null
choices: ['true', 'false']
state:
description:
- Manages desired state of the resource.
required: false
default: present
choices: ['present', 'default']
'''
EXAMPLES = '''
- name: Default igmp global params (all params except restart)
nxos_igmp:
state: default
host: "{{ inventory_hostname }}"
- name: Ensure the following igmp global config exists on the device
nxos_igmp:
flush_routes: true
enforce_rtr_alert: true
host: "{{ inventory_hostname }}"
- name: Restart the igmp process
nxos_igmp:
restart: true
host: "{{ inventory_hostname }}"
'''
RETURN = '''
updates:
description: commands sent to the device
returned: always
type: list
sample: ["ip igmp flush-routes"]
'''
from ansible.module_utils.nxos import load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def get_current(module):
output = run_commands(module, {'command': 'show running-config', 'output': 'text'})
return {
'flush_routes': 'ip igmp flush-routes' in output[0],
'enforce_rtr_alert': 'ip igmp enforce-router-alert' in output[0]
}
def get_desired(module):
return {
'flush_routes': module.params['flush_routes'],
'enforce_rtr_alert': module.params['enforce_rtr_alert']
}
def main():
argument_spec = dict(
flush_routes=dict(type='bool'),
enforce_rtr_alert=dict(type='bool'),
restart=dict(type='bool', default=False),
state=dict(choices=['present', 'default'], default='present'),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
current = get_current(module)
desired = get_desired(module)
state = module.params['state']
restart = module.params['restart']
commands = list()
if state == 'default':
if current['flush_routes']:
commands.append('no ip igmp flush-routes')
if current['enforce_rtr_alert']:
commands.append('no ip igmp enforce-router-alert')
elif state == 'present':
if desired['flush_routes'] and not current['flush_routes']:
commands.append('ip igmp flush-routes')
if desired['enforce_rtr_alert'] and not current['enforce_rtr_alert']:
commands.append('ip igmp enforce-router-alert')
result = {'changed': False, 'updates': commands, 'warnings': warnings}
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
if module.params['restart']:
run_commands(module, 'restart igmp')
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
bsmithyman/pymatsolver | pymatsolver/Tests/test_Triangle.py | 1 | 1715 | import unittest
import numpy as np, scipy.sparse as sp
TOL = 1e-12
class TestMumps(unittest.TestCase):
def setUp(self):
n = 50
nrhs = 20
self.A = sp.rand(n, n, 0.4) + sp.identity(n)
self.sol = np.ones((n, nrhs))
self.rhsU = sp.triu(self.A) * self.sol
self.rhsL = sp.tril(self.A) * self.sol
def test_directLower(self):
from pymatsolver import ForwardSolver
ALinv = ForwardSolver(sp.tril(self.A))
X = ALinv * self.rhsL
x = ALinv * self.rhsL[:,0]
self.assertLess(np.linalg.norm(self.sol-X,np.inf), TOL)
self.assertLess(np.linalg.norm(self.sol[:,0]-x,np.inf), TOL)
def test_directLower_1(self):
from pymatsolver import BackwardSolver
AUinv = BackwardSolver(sp.triu(self.A))
X = AUinv * self.rhsU
x = AUinv * self.rhsU[:,0]
self.assertLess(np.linalg.norm(self.sol-X,np.inf), TOL)
self.assertLess(np.linalg.norm(self.sol[:,0]-x,np.inf), TOL)
def test_directLower_python(self):
from pymatsolver import _ForwardSolver
ALinv = _ForwardSolver(sp.tril(self.A))
X = ALinv * self.rhsL
x = ALinv * self.rhsL[:,0]
self.assertLess(np.linalg.norm(self.sol-X,np.inf), TOL)
self.assertLess(np.linalg.norm(self.sol[:,0]-x,np.inf), TOL)
def test_directLower_1_python(self):
from pymatsolver import _BackwardSolver
AUinv = _BackwardSolver(sp.triu(self.A))
X = AUinv * self.rhsU
x = AUinv * self.rhsU[:,0]
self.assertLess(np.linalg.norm(self.sol-X,np.inf), TOL)
self.assertLess(np.linalg.norm(self.sol[:,0]-x,np.inf), TOL)
if __name__ == '__main__':
unittest.main()
| mit |
mgree/sigplan | www-copy/web/playground/tfidf_testing.py | 4 | 4127 | import nltk
import string
import os
import sys
import codecs
import math
import re
import operator
import unicodedata
from nltk.corpus import stopwords
stemmer = nltk.stem.wordnet.WordNetLemmatizer()
stem = stemmer.lemmatize
def encode_ascii (word):
return word.encode('ascii', 'ignore')
words = ["ponies", "cats", "cat", "caresses"]
words = map(stem, words)
words = map(encode_ascii, words)
print words
f = open ("sort_trial", 'w')
final_whitelist = set(['a', 'aa', 'aaa', 'a', 'b', '%%', '%32*', '*'])
final_whitelist = list(final_whitelist)
final_whitelist.sort(key = len)
for whitelist_term in final_whitelist:
f.write(whitelist_term + "\n")
path = '/Users/evelynding/Documents/College/Junior/IW/tmpl/raw/full/pldi_full'
token_dict = []
file_name = []
wordDocs = {}
wordCount = {}
numWords = 0
numDocs = 0
pattern = re.compile("([0-9])+-fulltext.txt")
dir = '/Users/evelynding/Documents/College/Junior/IW/tmpl/www/web/playground/pldi_lemmatized'
if not os.path.exists(dir):
os.makedirs(dir)
regex = re.compile('[^ a-z]')
for subdir, dirs, files in os.walk(path):
for file in files:
if pattern.match(file):
numDocs += 1
file_path = subdir + os.path.sep + file
shakes = open(file_path, 'r')
text = shakes.read()
lowers = text.lower()
#no_punc = lowers.translate (None, string.punctuation)
#no_nums = no_punc.translate(None, '0123456789')
no_nums = regex.sub('', lowers)
words = no_nums.split()
words = map(stem, words)
words = map(encode_ascii, words)
tempDict = {}
for word in words:
if len(word) >= 3:
numWords += 1
if word in tempDict:
tempDict[word] += 1
else:
tempDict[word] = 1
for key, value in tempDict.iteritems():
if key in wordCount:
wordCount[key] += value
wordDocs[key] += 1
else:
wordCount[key] = value
wordDocs[key] = 1
print numWords
topTerms = {}
for key in wordCount:
topTerms[key] = abs (wordCount[key]/float(numWords)*math.log(wordDocs[key]/float(numDocs)))
print numWords
print numDocs
whitelist = sorted(topTerms.items(), key=operator.itemgetter(1), reverse=True)[:6000]
whitelist = [i[0] for i in whitelist]
f = open ("whitelist", 'w')
whitelist = set(whitelist)
print len(whitelist)
blacklist = set(stopwords.words('english'))
curated_stopwords = set(['bdbc', 'bcbc', 'ddd', 'dddd', 'dda', 'dcd', 'dbd', 'cbd', 'bba', 'bdb', 'bdba', 'bdbe', 'beba', 'bfba', 'cbc', 'ccd', 'cdd',
'dbdd', 'dbddd', 'ddb', 'ddbd', 'dddb', 'dddda', 'ddddd', 'dddddb', 'dddddd', 'acd'])
secondary_whitelist = whitelist.difference(blacklist)
final_whitelist = secondary_whitelist.difference(curated_stopwords)
final_whitelist = list(final_whitelist)
final_whitelist.sort(key = len)
for whitelist_term in final_whitelist:
f.write(whitelist_term + "\n")
print len(final_whitelist)
#if (wordDocs[whitelist_term] < cutOff):
# f.write( '{:<20} {:<20} {:<10} {:<10}\n'.format (whitelist_term, topTerms[whitelist_term], wordCount[whitelist_term], wordDocs[whitelist_term]))
#f.write (whitelist_term + " ")
print "done with the whitelist..."
for subdir, dirs, files in os.walk(path):
for file in files:
if pattern.match(file):
file_path = subdir + os.path.sep + file
shakes = open(file_path)
text = shakes.read()
lowers = text.lower()
no_punc = lowers.translate (None, string.punctuation)
no_nums = no_punc.translate(None, '0123456789')
words = no_nums.split()
newWords = [x for x in words if x in final_whitelist]
newfile = dir + '/res' + subdir[-4:] + '-' + file
f = open(newfile, 'w')
for newWord in newWords:
f.write(newWord + " " )
# # if not os.path.exists('/popl'):
# # os.makedirs(dir) | mit |
havard024/prego | crm/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langcyrillicmodel.py | 2762 | 17725 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# KOI8-R language model
# Character Mapping Table:
KOI8R_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = {
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = {
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = {
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = {
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "MacCyrillic"
};
Ibm866Model = {
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM866"
}
Ibm855Model = {
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM855"
}
# flake8: noqa
| mit |
scieloorg/pulsemob_webservices | pulsemob_webservices/webservices/test_webservices.py | 2 | 21640 | __author__ = 'vitor'
from pprint import pprint
from rest_framework import status
from rest_framework.test import APITestCase, APIClient
import json
import ConfigParser
from models import *
config = ConfigParser.ConfigParser()
config.read('webservices/test_webservices.cfg')
facebook_id = config.get("test_webservices", "facebook_id")
facebook_token = config.get("test_webservices", "facebook_token")
google_id = config.get("test_webservices", "google_id")
google_token = config.get("test_webservices", "google_token")
email = config.get("test_webservices", "email")
name = config.get("test_webservices", "name")
font_size = config.get("test_webservices", "font_size")
language = config.get("test_webservices", "language")
def insert_feed_publications():
# Inserting feeds.
feed_1 = Feed(None, "Human Sciences", "Ciencias Humanas", "Humanidades")
feed_2 = Feed(None, "Biological Sciences", "Ciencias Biologicas", "Ciencias Biologicas")
feed_3 = Feed(None, "Health Sciences", "Ciencias da Saude", "Ciencias de la Salud")
feed_1.save()
feed_2.save()
feed_3.save()
# Inserting publications
publication_1 = Publication(1, 'Old Testament Essays')
publication_2 = Publication(2, 'Samj: South African Medical Journal')
publication_3 = Publication(3, 'South African Journal Of Psychiatry')
publication_5 = Publication(5, 'Acta Theologica')
publication_6 = Publication(6, 'South African Journal Of Occupational Therapy')
publication_7 = Publication(7, 'Water Sa')
publication_8 = Publication(8, 'Escola Anna Nery')
publication_9 = Publication(9, 'Brazilian Journal Of Otorhinolaryngology')
publication_18 = Publication(18, 'Crop Breeding And Applied Biotechnology')
publication_22 = Publication(22, 'Trabalho, Educacao E Saude')
publication_23 = Publication(23, 'Brazilian Journal Of Medical And Biological Research')
publication_24 = Publication(24, 'Anales Del Instituto De Investigaciones Esteticas')
publication_1.save()
publication_2.save()
publication_3.save()
publication_5.save()
publication_6.save()
publication_7.save()
publication_8.save()
publication_9.save()
publication_18.save()
publication_22.save()
publication_23.save()
publication_24.save()
# Insert relations
publication_1.feeds.add(feed_1)
publication_2.feeds.add(feed_2)
publication_2.feeds.add(feed_3)
publication_3.feeds.add(feed_3)
publication_5.feeds.add(feed_1)
publication_6.feeds.add(feed_2)
publication_6.feeds.add(feed_3)
publication_7.feeds.add(feed_1)
publication_7.feeds.add(feed_2)
publication_8.feeds.add(feed_3)
publication_9.feeds.add(feed_3)
publication_18.feeds.add(feed_2)
publication_22.feeds.add(feed_1)
publication_23.feeds.add(feed_2)
publication_24.feeds.add(feed_1)
class Tests(APITestCase):
def test_case1(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "Login didn't work.")
response_dict = json.loads(response._container[0])
self.assertEqual(response_dict['user']['facebook_id'], facebook_id, "Facebook id doesn't match.")
self.assertEqual(response_dict['user']['email'], email, "Email doesn't match.")
self.assertEqual(response_dict['user']['name'], name, "Name doesn't match.")
self.assertEqual(response_dict['user']['font_size'], font_size, "Font size doesn't match.")
self.assertEqual(response_dict['user']['language'], language, "Language doesn't match.")
def test_case2(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "First login didn't work.")
response_dict1 = json.loads(response._container[0])
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "Second login didn't work.")
response_dict2 = json.loads(response._container[0])
self.assertEqual(response_dict2['user']['id'], response_dict1['user']['id'], "ID doesn't match.")
self.assertEqual(response_dict2['user']['facebook_id'], response_dict1['user']['facebook_id'], "Facebook id doesn't match.")
self.assertEqual(response_dict2['user']['email'], response_dict1['user']['email'], "Email doesn't match.")
self.assertEqual(response_dict2['user']['name'], response_dict1['user']['name'], "Name doesn't match.")
self.assertEqual(response_dict2['user']['font_size'], response_dict1['user']['font_size'], "Font size doesn't match.")
self.assertEqual(response_dict2['user']['language'], response_dict1['user']['language'], "Language doesn't match.")
def test_case3(self):
client = APIClient()
client.credentials(HTTP_GOOGLEID=google_id, HTTP_TOKEN=google_token)
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "Login didn't work.")
response_dict = json.loads(response._container[0])
self.assertEqual(response_dict['user']['google_id'], google_id, "Google id doesn't match.")
self.assertEqual(response_dict['user']['email'], email, "Email doesn't match.")
self.assertEqual(response_dict['user']['name'], name, "Name doesn't match.")
self.assertEqual(response_dict['user']['font_size'], font_size, "Font size doesn't match.")
self.assertEqual(response_dict['user']['language'], language, "Language doesn't match.")
def test_case4(self):
client = APIClient()
client.credentials(HTTP_GOOGLEID=google_id, HTTP_TOKEN=google_token)
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "First login didn't work.")
response_dict1 = json.loads(response._container[0])
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "Second login didn't work.")
response_dict2 = json.loads(response._container[0])
self.assertEqual(response_dict2['user']['id'], response_dict1['user']['id'], "ID doesn't match.")
self.assertEqual(response_dict2['user']['google_id'], response_dict1['user']['google_id'], "Google id doesn't match.")
self.assertEqual(response_dict2['user']['email'], response_dict1['user']['email'], "Email doesn't match.")
self.assertEqual(response_dict2['user']['name'], response_dict1['user']['name'], "Name doesn't match.")
self.assertEqual(response_dict2['user']['font_size'], response_dict1['user']['font_size'], "Font size doesn't match.")
self.assertEqual(response_dict2['user']['language'], response_dict1['user']['language'], "Language doesn't match.")
def test_case5(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "First login didn't work.")
response_dict1 = json.loads(response._container[0])
client = APIClient()
client.credentials(HTTP_GOOGLEID=google_id, HTTP_TOKEN=google_token)
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "Second login didn't work.")
response_dict2 = json.loads(response._container[0])
self.assertEqual(response_dict2['user']['id'], response_dict1['user']['id'], "ID doesn't match.")
self.assertEqual(response_dict2['user']['facebook_id'], response_dict1['user']['facebook_id'], "Facebook id doesn't match.")
self.assertEqual(response_dict2['user']['email'], response_dict1['user']['email'], "Email doesn't match.")
self.assertEqual(response_dict2['user']['name'], response_dict1['user']['name'], "Name doesn't match.")
self.assertEqual(response_dict2['user']['font_size'], response_dict1['user']['font_size'], "Font size doesn't match.")
self.assertEqual(response_dict2['user']['language'], response_dict1['user']['language'], "Language doesn't match.")
def test_case6(self):
client = APIClient()
client.credentials(HTTP_GOOGLEID=google_id, HTTP_TOKEN=google_token)
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "First login didn't work.")
response_dict1 = json.loads(response._container[0])
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "Second login didn't work.")
response_dict2 = json.loads(response._container[0])
self.assertEqual(response_dict2['user']['id'], response_dict1['user']['id'], "ID doesn't match.")
self.assertEqual(response_dict2['user']['facebook_id'], facebook_id, "Facebook id doesn't match.")
self.assertEqual(response_dict2['user']['google_id'], google_id, "Google id doesn't match.")
self.assertEqual(response_dict2['user']['email'], response_dict1['user']['email'], "Email doesn't match.")
self.assertEqual(response_dict2['user']['name'], response_dict1['user']['name'], "Name doesn't match.")
self.assertEqual(response_dict2['user']['font_size'], response_dict1['user']['font_size'], "Font size doesn't match.")
self.assertEqual(response_dict2['user']['language'], response_dict1['user']['language'], "Language doesn't match.")
def test_case7(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN='invalidtoken')
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
self.assertNotEqual(response.status_code, status.HTTP_200_OK, "Login shouldn't work.")
def test_case8(self):
client = APIClient()
client.credentials(HTTP_GOOGLEID=google_id, HTTP_TOKEN='invalidtoken')
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
self.assertNotEqual(response.status_code, status.HTTP_200_OK, "Login shouldn't work.")
def test_case9(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
insert_feed_publications()
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.get('/home/', {}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "/home/ didn't work.")
def test_case10(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.get('/solr/version', {}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "/solr/version didn't work.")
def test_case11(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/favorite/create', {'article_id': 'S0102-76382014000300344scl'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "/favorite/create didn't work.")
def test_case12(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.get('/favorite/read', {}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "/favorite/read didn't work.")
def test_case13(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/favorite/create', {'article_id': 'S0102-76382014000300344scl'}, format='json')
response = client.get('/favorite/read', {}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "/favorite/read didn't work.")
def test_case14(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/favorite/create', {'article_id': 'S0102-76382014000300344scl'}, format='json')
response = client.post('/favorite/delete', {'article_id': 'S0102-76382014000300344scl'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "/favorite/delete didn't work.")
def test_case15(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
insert_feed_publications()
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.get('/feed/publications/list', {}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "/feed/publications/list didn't work.")
#
def test_case16(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
insert_feed_publications()
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/preferences/feed/exclusion/create', {'feed_id': 1}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "/preferences/feed/exclusion/create didn't work.")
def test_case17(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
insert_feed_publications()
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/preferences/feed/exclusion/create', {'feed_id': 1}, format='json')
response = client.post('/preferences/feed/exclusion/delete', {'feed_id': 1}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "/preferences/feed/exclusion/delete didn't work.")
def test_case18(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
insert_feed_publications()
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/preferences/feed/publication/exclusion/create', {'feed_id': 1, 'publication_id': 1},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "/preferences/feed/publication/exclusion/create didn't work.")
def test_case19(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
insert_feed_publications()
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/preferences/feed/publication/exclusion/create', {'feed_id': 1, 'publication_id': 1},
format='json')
response = client.post('/preferences/feed/publication/exclusion/delete', {'feed_id': 1, 'publication_id': 1},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK,
"/preferences/feed/publication/exclusion/delete didn't work.")
def test_case20(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
insert_feed_publications()
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/preferences/feed/publication/exclusion/all/create', {'feed_id': 1}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "preferences/feed/publication/exclusion/all/create didn't work.")
def test_case21(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
insert_feed_publications()
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/preferences/feed/publication/exclusion/all/create', {'feed_id': 1}, format='json')
response = client.post('/preferences/feed/publication/exclusion/all/delete', {'feed_id': 1}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "/preferences/feed/publication/exclusion/all/delete didn't work.")
def test_case22(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/user/language', {'language': 'pt'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, "/user/language didn't work.")
def test_case23(self):
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/login/', {'email': email, 'name': name, 'font_size': font_size, 'language': language}, format='json')
client = APIClient()
client.credentials(HTTP_FACEBOOKID=facebook_id, HTTP_TOKEN=facebook_token)
response = client.post('/user/font', {'font_size': 'S'}, format='json')
self.assertEqual(0, Feed.objects.count(), "It should be zero.")
self.assertEqual(response.status_code, status.HTTP_200_OK, "/user/font didn't work.")
| bsd-2-clause |
varigit/VAR-SOM-AM33-Kernel-3-14 | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
350dotorg/Django | django/utils/module_loading.py | 112 | 2190 | import imp
import os
import sys
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
name = ".".join([package.__name__, module_name])
if name in sys.modules:
return True
for finder in sys.meta_path:
if finder.find_module(name):
return True
for entry in package.__path__: # No __path__, then not a package.
try:
# Try the cached finder.
finder = sys.path_importer_cache[entry]
if finder is None:
# Implicit import machinery should be used.
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
continue
# Else see if the finder knows of a loader.
elif finder.find_module(name):
return True
else:
continue
except KeyError:
# No cached finder, so try and make one.
for hook in sys.path_hooks:
try:
finder = hook(entry)
# XXX Could cache in sys.path_importer_cache
if finder.find_module(name):
return True
else:
# Once a finder is found, stop the search.
break
except ImportError:
# Continue the search for a finder.
continue
else:
# No finder found.
# Try the implicit import machinery if searching a directory.
if os.path.isdir(entry):
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
pass
# XXX Could insert None or NullImporter
else:
# Exhausted the search, so the module cannot be found.
return False
| bsd-3-clause |
nightjean/Deep-Learning | tensorflow/tools/dist_test/python/mnist_replica.py | 57 | 10810 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed MNIST training and validation, with model replicas.
A simple softmax model with one hidden layer is defined. The parameters
(weights and biases) are located on two parameter servers (ps), while the
ops are defined on a worker node. The TF sessions also run on the worker
node.
Multiple invocations of this script can be done in parallel, with different
values for --task_index. There should be exactly one invocation with
--task_index, which will create a master session that carries out variable
initialization. The other, non-master, sessions will wait for the master
session to finish the initialization before proceeding to the training stage.
The coordination between the multiple worker invocations occurs due to
the definition of the parameters on the same ps devices. The parameter updates
from one worker is visible to all other workers. As such, the workers can
perform forward computation and gradient calculation in parallel, which
should lead to increased training speed for the simple model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
import tempfile
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
flags = tf.app.flags
flags.DEFINE_string("data_dir", "/tmp/mnist-data",
"Directory for storing mnist data")
flags.DEFINE_boolean("download_only", False,
"Only perform downloading of data; Do not proceed to "
"session preparation, model definition or training")
flags.DEFINE_integer("task_index", None,
"Worker task index, should be >= 0. task_index=0 is "
"the master worker task the performs the variable "
"initialization ")
flags.DEFINE_integer("num_gpus", 1,
"Total number of gpus for each machine."
"If you don't use GPU, please set it to '0'")
flags.DEFINE_integer("replicas_to_aggregate", None,
"Number of replicas to aggregate before parameter update"
"is applied (For sync_replicas mode only; default: "
"num_workers)")
flags.DEFINE_integer("hidden_units", 100,
"Number of units in the hidden layer of the NN")
flags.DEFINE_integer("train_steps", 200,
"Number of (global) training steps to perform")
flags.DEFINE_integer("batch_size", 100, "Training batch size")
flags.DEFINE_float("learning_rate", 0.01, "Learning rate")
flags.DEFINE_boolean("sync_replicas", False,
"Use the sync_replicas (synchronized replicas) mode, "
"wherein the parameter updates from workers are aggregated "
"before applied to avoid stale gradients")
flags.DEFINE_boolean(
"existing_servers", False, "Whether servers already exists. If True, "
"will use the worker hosts via their GRPC URLs (one client process "
"per worker host). Otherwise, will create an in-process TensorFlow "
"server.")
flags.DEFINE_string("ps_hosts","localhost:2222",
"Comma-separated list of hostname:port pairs")
flags.DEFINE_string("worker_hosts", "localhost:2223,localhost:2224",
"Comma-separated list of hostname:port pairs")
flags.DEFINE_string("job_name", None,"job name: worker or ps")
FLAGS = flags.FLAGS
IMAGE_PIXELS = 28
def main(unused_argv):
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
if FLAGS.download_only:
sys.exit(0)
if FLAGS.job_name is None or FLAGS.job_name == "":
raise ValueError("Must specify an explicit `job_name`")
if FLAGS.task_index is None or FLAGS.task_index =="":
raise ValueError("Must specify an explicit `task_index`")
print("job name = %s" % FLAGS.job_name)
print("task index = %d" % FLAGS.task_index)
#Construct the cluster and start the server
ps_spec = FLAGS.ps_hosts.split(",")
worker_spec = FLAGS.worker_hosts.split(",")
# Get the number of workers.
num_workers = len(worker_spec)
cluster = tf.train.ClusterSpec({
"ps": ps_spec,
"worker": worker_spec})
if not FLAGS.existing_servers:
# Not using existing servers. Create an in-process server.
server = tf.train.Server(
cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index)
if FLAGS.job_name == "ps":
server.join()
is_chief = (FLAGS.task_index == 0)
if FLAGS.num_gpus > 0:
if FLAGS.num_gpus < num_workers:
raise ValueError("number of gpus is less than number of workers")
# Avoid gpu allocation conflict: now allocate task_num -> #gpu
# for each worker in the corresponding machine
gpu = (FLAGS.task_index % FLAGS.num_gpus)
worker_device = "/job:worker/task:%d/gpu:%d" % (FLAGS.task_index, gpu)
elif FLAGS.num_gpus == 0:
# Just allocate the CPU to worker server
cpu = 0
worker_device = "/job:worker/task:%d/cpu:%d" % (FLAGS.task_index, cpu)
# The device setter will automatically place Variables ops on separate
# parameter servers (ps). The non-Variable ops will be placed on the workers.
# The ps use CPU and workers use corresponding GPU
with tf.device(
tf.train.replica_device_setter(
worker_device=worker_device,
ps_device="/job:ps/cpu:0",
cluster=cluster)):
global_step = tf.Variable(0, name="global_step", trainable=False)
# Variables of the hidden layer
hid_w = tf.Variable(
tf.truncated_normal(
[IMAGE_PIXELS * IMAGE_PIXELS, FLAGS.hidden_units],
stddev=1.0 / IMAGE_PIXELS),
name="hid_w")
hid_b = tf.Variable(tf.zeros([FLAGS.hidden_units]), name="hid_b")
# Variables of the softmax layer
sm_w = tf.Variable(
tf.truncated_normal(
[FLAGS.hidden_units, 10],
stddev=1.0 / math.sqrt(FLAGS.hidden_units)),
name="sm_w")
sm_b = tf.Variable(tf.zeros([10]), name="sm_b")
# Ops: located on the worker specified with FLAGS.task_index
x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS * IMAGE_PIXELS])
y_ = tf.placeholder(tf.float32, [None, 10])
hid_lin = tf.nn.xw_plus_b(x, hid_w, hid_b)
hid = tf.nn.relu(hid_lin)
y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))
cross_entropy = -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
opt = tf.train.AdamOptimizer(FLAGS.learning_rate)
if FLAGS.sync_replicas:
if FLAGS.replicas_to_aggregate is None:
replicas_to_aggregate = num_workers
else:
replicas_to_aggregate = FLAGS.replicas_to_aggregate
opt = tf.train.SyncReplicasOptimizer(
opt,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=num_workers,
name="mnist_sync_replicas")
train_step = opt.minimize(cross_entropy, global_step=global_step)
if FLAGS.sync_replicas:
local_init_op = opt.local_step_init_op
if is_chief:
local_init_op = opt.chief_init_op
ready_for_local_init_op = opt.ready_for_local_init_op
# Initial token and chief queue runners required by the sync_replicas mode
chief_queue_runner = opt.get_chief_queue_runner()
sync_init_op = opt.get_init_tokens_op()
init_op = tf.global_variables_initializer()
train_dir = tempfile.mkdtemp()
if FLAGS.sync_replicas:
sv = tf.train.Supervisor(
is_chief=is_chief,
logdir=train_dir,
init_op=init_op,
local_init_op=local_init_op,
ready_for_local_init_op=ready_for_local_init_op,
recovery_wait_secs=1,
global_step=global_step)
else:
sv = tf.train.Supervisor(
is_chief=is_chief,
logdir=train_dir,
init_op=init_op,
recovery_wait_secs=1,
global_step=global_step)
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
device_filters=["/job:ps", "/job:worker/task:%d" % FLAGS.task_index])
# The chief worker (task_index==0) session will prepare the session,
# while the remaining workers will wait for the preparation to complete.
if is_chief:
print("Worker %d: Initializing session..." % FLAGS.task_index)
else:
print("Worker %d: Waiting for session to be initialized..." %
FLAGS.task_index)
if FLAGS.existing_servers:
server_grpc_url = "grpc://" + worker_spec[FLAGS.task_index]
print("Using existing server at: %s" % server_grpc_url)
sess = sv.prepare_or_wait_for_session(server_grpc_url,
config=sess_config)
else:
sess = sv.prepare_or_wait_for_session(server.target, config=sess_config)
print("Worker %d: Session initialization complete." % FLAGS.task_index)
if FLAGS.sync_replicas and is_chief:
# Chief worker will start the chief queue runner and call the init op.
sess.run(sync_init_op)
sv.start_queue_runners(sess, [chief_queue_runner])
# Perform training
time_begin = time.time()
print("Training begins @ %f" % time_begin)
local_step = 0
while True:
# Training feed
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)
train_feed = {x: batch_xs, y_: batch_ys}
_, step = sess.run([train_step, global_step], feed_dict=train_feed)
local_step += 1
now = time.time()
print("%f: Worker %d: training step %d done (global step: %d)" %
(now, FLAGS.task_index, local_step, step))
if step >= FLAGS.train_steps:
break
time_end = time.time()
print("Training ends @ %f" % time_end)
training_time = time_end - time_begin
print("Training elapsed time: %f s" % training_time)
# Validation feed
val_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
val_xent = sess.run(cross_entropy, feed_dict=val_feed)
print("After %d training step(s), validation cross entropy = %g" %
(FLAGS.train_steps, val_xent))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
ryfeus/lambda-packs | Rasterio_osgeo_shapely_PIL_pyproj_numpy/source/docutils/languages/pt_br.py | 148 | 1982 | # $Id: pt_br.py 5567 2008-06-03 01:11:03Z goodger $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Brazilian Portuguese-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': u'Autor',
'authors': u'Autores',
'organization': u'Organiza\u00E7\u00E3o',
'address': u'Endere\u00E7o',
'contact': u'Contato',
'version': u'Vers\u00E3o',
'revision': u'Revis\u00E3o',
'status': u'Estado',
'date': u'Data',
'copyright': u'Copyright',
'dedication': u'Dedicat\u00F3ria',
'abstract': u'Resumo',
'attention': u'Aten\u00E7\u00E3o!',
'caution': u'Cuidado!',
'danger': u'PERIGO!',
'error': u'Erro',
'hint': u'Sugest\u00E3o',
'important': u'Importante',
'note': u'Nota',
'tip': u'Dica',
'warning': u'Aviso',
'contents': u'Sum\u00E1rio'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
u'autor': 'author',
u'autores': 'authors',
u'organiza\u00E7\u00E3o': 'organization',
u'endere\u00E7o': 'address',
u'contato': 'contact',
u'vers\u00E3o': 'version',
u'revis\u00E3o': 'revision',
u'estado': 'status',
u'data': 'date',
u'copyright': 'copyright',
u'dedicat\u00F3ria': 'dedication',
u'resumo': 'abstract'}
"""Brazilian Portuguese (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| mit |
nwokeo/supysonic | supysonic/lastfm.py | 2 | 3288 | # coding: utf-8
# This file is part of Supysonic.
#
# Supysonic is a Python implementation of the Subsonic server API.
# Copyright (C) 2013 Alban 'spl0k' Féron
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import requests, hashlib
from supysonic import config
class LastFm:
def __init__(self, user, logger):
self.__user = user
self.__api_key = config.get('lastfm', 'api_key')
self.__api_secret = config.get('lastfm', 'secret')
self.__enabled = self.__api_key is not None and self.__api_secret is not None
self.__logger = logger
def link_account(self, token):
if not self.__enabled:
return False, 'No API key set'
res = self.__api_request(False, method = 'auth.getSession', token = token)
if not res:
return False, 'Error connecting to LastFM'
elif 'error' in res:
return False, 'Error %i: %s' % (res['error'], res['message'])
else:
self.__user.lastfm_session = res['session']['key']
self.__user.lastfm_status = True
return True, 'OK'
def unlink_account(self):
self.__user.lastfm_session = None
self.__user.lastfm_status = True
def now_playing(self, track):
if not self.__enabled:
return
self.__api_request(True, method = 'track.updateNowPlaying', artist = track.album.artist.name, track = track.title, album = track.album.name,
trackNumber = track.number, duration = track.duration)
def scrobble(self, track, ts):
if not self.__enabled:
return
self.__api_request(True, method = 'track.scrobble', artist = track.album.artist.name, track = track.title, album = track.album.name,
timestamp = ts, trackNumber = track.number, duration = track.duration)
def __api_request(self, write, **kwargs):
if not self.__enabled:
return
if write:
if not self.__user.lastfm_session or not self.__user.lastfm_status:
return
kwargs['sk'] = self.__user.lastfm_session
kwargs['api_key'] = self.__api_key
sig_str = ''
for k, v in sorted(kwargs.iteritems()):
if type(v) is unicode:
sig_str += k + v.encode('utf-8')
else:
sig_str += k + str(v)
sig = hashlib.md5(sig_str + self.__api_secret).hexdigest()
kwargs['api_sig'] = sig
kwargs['format'] = 'json'
try:
if write:
r = requests.post('http://ws.audioscrobbler.com/2.0/', data = kwargs)
else:
r = requests.get('http://ws.audioscrobbler.com/2.0/', params = kwargs)
except requests.exceptions.RequestException, e:
self.__logger.warn('Error while connecting to LastFM: ' + str(e))
return None
json = r.json()
if 'error' in json:
if json['error'] in (9, '9'):
self.__user.lastfm_status = False
self.__logger.warn('LastFM error %i: %s' % (json['error'], json['message']))
return json
| agpl-3.0 |
ric2b/Vivaldi-browser | chromium/components/policy/tools/make_policy_zip.py | 1 | 2188 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a zip archive with policy template files.
"""
import optparse
import sys
import zipfile
def add_files_to_zip(zip_file, base_dir, file_list):
"""Pack a list of files into a zip archive, that is already opened for
writing.
Args:
zip_file: An object representing the zip archive.
base_dir: Base path of all the files in the real file system.
file_list: List of absolute file paths to add. Must start with base_dir.
The base_dir is stripped in the zip file entries.
"""
if (base_dir[-1] != '/'):
base_dir += '/'
for file_path in file_list:
assert file_path.startswith(base_dir)
zip_file.write(file_path, file_path[len(base_dir):])
return 0
def main(argv):
"""Pack a list of files into a zip archive.
Args:
output: The file path of the zip archive.
base_dir: Base path of input files.
languages: Comma-separated list of languages, e.g. en-US,de.
add: List of files to include in the archive. The language placeholder
${lang} is expanded into one file for each language.
"""
parser = optparse.OptionParser()
parser.add_option("--output", dest="output")
parser.add_option("--base_dir", dest="base_dir")
parser.add_option("--languages", dest="languages")
parser.add_option("--add", action="append", dest="files", default=[])
options, args = parser.parse_args(argv[1:])
# Process file list, possibly expanding language placeholders.
_LANG_PLACEHOLDER = "${lang}"
languages = filter(bool, options.languages.split(','))
file_list = []
for file_to_add in options.files:
if (_LANG_PLACEHOLDER in file_to_add):
for lang in languages:
file_list.append(file_to_add.replace(_LANG_PLACEHOLDER, lang))
else:
file_list.append(file_to_add)
zip_file = zipfile.ZipFile(options.output, 'w', zipfile.ZIP_DEFLATED)
try:
return add_files_to_zip(zip_file, options.base_dir, file_list)
finally:
zip_file.close()
if '__main__' == __name__:
sys.exit(main(sys.argv))
| bsd-3-clause |
fabianp/scikit-learn | sklearn/externals/joblib/logger.py | 359 | 5135 | """
Helpers for logging.
This module needs much love to become useful.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2008 Gael Varoquaux
# License: BSD Style, 3 clauses.
from __future__ import print_function
import time
import sys
import os
import shutil
import logging
import pprint
from .disk import mkdirp
def _squeeze_time(t):
"""Remove .1s to the time under Windows: this is the time it take to
stat files. This is needed to make results similar to timings under
Unix, for tests
"""
if sys.platform.startswith('win'):
return max(0, t - .1)
else:
return t
def format_time(t):
t = _squeeze_time(t)
return "%.1fs, %.1fmin" % (t, t / 60.)
def short_format_time(t):
t = _squeeze_time(t)
if t > 60:
return "%4.1fmin" % (t / 60.)
else:
return " %5.1fs" % (t)
def pformat(obj, indent=0, depth=3):
if 'numpy' in sys.modules:
import numpy as np
print_options = np.get_printoptions()
np.set_printoptions(precision=6, threshold=64, edgeitems=1)
else:
print_options = None
out = pprint.pformat(obj, depth=depth, indent=indent)
if print_options:
np.set_printoptions(**print_options)
return out
###############################################################################
# class `Logger`
###############################################################################
class Logger(object):
""" Base class for logging messages.
"""
def __init__(self, depth=3):
"""
Parameters
----------
depth: int, optional
The depth of objects printed.
"""
self.depth = depth
def warn(self, msg):
logging.warn("[%s]: %s" % (self, msg))
def debug(self, msg):
# XXX: This conflicts with the debug flag used in children class
logging.debug("[%s]: %s" % (self, msg))
def format(self, obj, indent=0):
""" Return the formated representation of the object.
"""
return pformat(obj, indent=indent, depth=self.depth)
###############################################################################
# class `PrintTime`
###############################################################################
class PrintTime(object):
""" Print and log messages while keeping track of time.
"""
def __init__(self, logfile=None, logdir=None):
if logfile is not None and logdir is not None:
raise ValueError('Cannot specify both logfile and logdir')
# XXX: Need argument docstring
self.last_time = time.time()
self.start_time = self.last_time
if logdir is not None:
logfile = os.path.join(logdir, 'joblib.log')
self.logfile = logfile
if logfile is not None:
mkdirp(os.path.dirname(logfile))
if os.path.exists(logfile):
# Rotate the logs
for i in range(1, 9):
try:
shutil.move(logfile + '.%i' % i,
logfile + '.%i' % (i + 1))
except:
"No reason failing here"
# Use a copy rather than a move, so that a process
# monitoring this file does not get lost.
try:
shutil.copy(logfile, logfile + '.1')
except:
"No reason failing here"
try:
with open(logfile, 'w') as logfile:
logfile.write('\nLogging joblib python script\n')
logfile.write('\n---%s---\n' % time.ctime(self.last_time))
except:
""" Multiprocessing writing to files can create race
conditions. Rather fail silently than crash the
computation.
"""
# XXX: We actually need a debug flag to disable this
# silent failure.
def __call__(self, msg='', total=False):
""" Print the time elapsed between the last call and the current
call, with an optional message.
"""
if not total:
time_lapse = time.time() - self.last_time
full_msg = "%s: %s" % (msg, format_time(time_lapse))
else:
# FIXME: Too much logic duplicated
time_lapse = time.time() - self.start_time
full_msg = "%s: %.2fs, %.1f min" % (msg, time_lapse,
time_lapse / 60)
print(full_msg, file=sys.stderr)
if self.logfile is not None:
try:
with open(self.logfile, 'a') as f:
print(full_msg, file=f)
except:
""" Multiprocessing writing to files can create race
conditions. Rather fail silently than crash the
calculation.
"""
# XXX: We actually need a debug flag to disable this
# silent failure.
self.last_time = time.time()
| bsd-3-clause |
gamecredits-project/gmc-dev | qa/rpc-tests/httpbasics.py | 148 | 4755 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test REST interface
#
from test_framework import BitcoinTestFramework
from util import *
import base64
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class HTTPBasicsTest (BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir, extra_args=[['-rpckeepalive=1'], ['-rpckeepalive=0'], [], []])
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urlparse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + base64.b64encode(authpair)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read();
assert_equal('"error":null' in out1, True)
assert_equal(conn.sock!=None, True) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out2 = conn.getresponse().read();
assert_equal('"error":null' in out1, True) #must also response with a correct json-rpc message
assert_equal(conn.sock!=None, True) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + base64.b64encode(authpair), "Connection": "keep-alive"}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read();
assert_equal('"error":null' in out1, True)
assert_equal(conn.sock!=None, True) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out2 = conn.getresponse().read();
assert_equal('"error":null' in out1, True) #must also response with a correct json-rpc message
assert_equal(conn.sock!=None, True) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + base64.b64encode(authpair), "Connection":"close"}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read();
assert_equal('"error":null' in out1, True)
assert_equal(conn.sock!=None, False) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urlparse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + base64.b64encode(authpair)}
conn = httplib.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read();
assert_equal('"error":null' in out1, True)
assert_equal(conn.sock!=None, False) #connection must be closed because keep-alive was set to false
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is off
urlNode2 = urlparse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + base64.b64encode(authpair)}
conn = httplib.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read();
assert_equal('"error":null' in out1, True)
assert_equal(conn.sock!=None, True) #connection must be closed because bitcoind should use keep-alive by default
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| mit |
cscott/wikiserver | whoosh/qparser/default.py | 21 | 16570 | # Copyright 2011 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import sys
from whoosh import query
from whoosh.qparser import syntax
from whoosh.qparser.common import print_debug, QueryParserError
# Query parser object
class QueryParser(object):
"""A hand-written query parser built on modular plug-ins. The default
configuration implements a powerful fielded query language similar to
Lucene's.
You can use the ``plugins`` argument when creating the object to override
the default list of plug-ins, and/or use ``add_plugin()`` and/or
``remove_plugin_class()`` to change the plug-ins included in the parser.
>>> from whoosh import qparser
>>> parser = qparser.QueryParser("content", schema)
>>> parser.remove_plugin_class(qparser.WildcardPlugin)
>>> parser.add_plugin(qparser.PrefixPlugin())
>>> parser.parse(u"hello there")
And([Term("content", u"hello"), Term("content", u"there")])
"""
def __init__(self, fieldname, schema, plugins=None, termclass=query.Term,
phraseclass=query.Phrase, group=syntax.AndGroup):
"""
:param fieldname: the default field -- the parser uses this as the
field for any terms without an explicit field.
:param schema: a :class:`whoosh.fields.Schema` object to use when
parsing. The appropriate fields in the schema will be used to
tokenize terms/phrases before they are turned into query objects.
You can specify None for the schema to create a parser that does
not analyze the text of the query, usually for testing purposes.
:param plugins: a list of plugins to use. WhitespacePlugin is
automatically included, do not put it in this list. This overrides
the default list of plugins. Classes in the list will be
automatically instantiated.
:param termclass: the query class to use for individual search terms.
The default is :class:`whoosh.query.Term`.
:param phraseclass: the query class to use for phrases. The default
is :class:`whoosh.query.Phrase`.
:param group: the default grouping. ``AndGroup`` makes terms required
by default. ``OrGroup`` makes terms optional by default.
"""
self.fieldname = fieldname
self.schema = schema
self.termclass = termclass
self.phraseclass = phraseclass
self.group = group
self.plugins = []
if not plugins:
plugins = self.default_set()
self._add_ws_plugin()
self.add_plugins(plugins)
def default_set(self):
"""Returns the default list of plugins to use.
"""
from whoosh.qparser import plugins
return [plugins.WhitespacePlugin(),
plugins.SingleQuotePlugin(),
plugins.FieldsPlugin(),
plugins.WildcardPlugin(),
plugins.PhrasePlugin(),
plugins.RangePlugin(),
plugins.GroupPlugin(),
plugins.OperatorsPlugin(),
plugins.BoostPlugin(),
plugins.EveryPlugin(),
]
def add_plugins(self, pins):
"""Adds the given list of plugins to the list of plugins in this
parser.
"""
for pin in pins:
self.add_plugin(pin)
def add_plugin(self, pin):
"""Adds the given plugin to the list of plugins in this parser.
"""
if isinstance(pin, type):
pin = pin()
self.plugins.append(pin)
def _add_ws_plugin(self):
from whoosh.qparser.plugins import WhitespacePlugin
self.add_plugin(WhitespacePlugin())
def remove_plugin(self, pi):
"""Removes the given plugin object from the list of plugins in this
parser.
"""
self.plugins.remove(pi)
def remove_plugin_class(self, cls):
"""Removes any plugins of the given class from this parser.
"""
self.plugins = [pi for pi in self.plugins if not isinstance(pi, cls)]
def replace_plugin(self, plugin):
"""Removes any plugins of the class of the given plugin and then adds
it. This is a convenience method to keep from having to call
``remove_plugin_class`` followed by ``add_plugin`` each time you want
to reconfigure a default plugin.
>>> qp = qparser.QueryParser("content", schema)
>>> qp.replace_plugin(qparser.NotPlugin("(^| )-"))
"""
self.remove_plugin_class(plugin.__class__)
self.add_plugin(plugin)
def _priorized(self, methodname):
# methodname is "taggers" or "filters". Returns a priorized list of
# tagger objects or filter functions.
items_and_priorities = []
for plugin in self.plugins:
# Call either .taggers() or .filters() on the plugin
method = getattr(plugin, methodname)
for item in method(self):
items_and_priorities.append(item)
# Sort the list by priority (lower priority runs first)
items_and_priorities.sort(key=lambda x: x[1])
# Return the sorted list without the priorities
return [item for item, _ in items_and_priorities]
def multitoken_query(self, spec, texts, fieldname, termclass, boost):
"""Returns a query for multiple texts. This method implements the
intention specified in the field's ``multitoken_query`` attribute,
which specifies what to do when strings that look like single terms
to the parser turn out to yield multiple tokens when analyzed.
:param spec: a string describing how to join the text strings into a
query. This is usually the value of the field's
``multitoken_query`` attribute.
:param texts: a list of token strings.
:param fieldname: the name of the field.
:param termclass: the query class to use for single terms.
:param boost: the original term's boost in the query string, should be
applied to the returned query object.
"""
spec = spec.lower()
if spec == "first":
# Throw away all but the first token
return termclass(fieldname, texts[0], boost=boost)
elif spec == "phrase":
# Turn the token into a phrase
return self.phraseclass(fieldname, texts, boost=boost)
else:
if spec == "default":
qclass = self.group.qclass
elif spec == "and":
qclass = query.And
elif spec == "or":
qclass = query.Or
else:
raise QueryParserError("Unknown multitoken_query value %r"
% spec)
return qclass([termclass(fieldname, t, boost=boost)
for t in texts])
def term_query(self, fieldname, text, termclass, boost=1.0, tokenize=True,
removestops=True):
"""Returns the appropriate query object for a single term in the query
string.
"""
if self.schema and fieldname in self.schema:
field = self.schema[fieldname]
# If this field type wants to parse queries itself, let it do so
# and return early
if field.self_parsing():
try:
return field.parse_query(fieldname, text, boost=boost)
except:
e = sys.exc_info()[1]
return query.error_query(e)
# Otherwise, ask the field to process the text into a list of
# tokenized strings
texts = list(field.process_text(text, mode="query",
tokenize=tokenize,
removestops=removestops))
# If the analyzer returned more than one token, use the field's
# multitoken_query attribute to decide what query class, if any, to
# use to put the tokens together
if len(texts) > 1:
return self.multitoken_query(field.multitoken_query, texts,
fieldname, termclass, boost)
# It's possible field.process_text() will return an empty list (for
# example, on a stop word)
if not texts:
return None
text = texts[0]
return termclass(fieldname, text, boost=boost)
def taggers(self):
"""Returns a priorized list of tagger objects provided by the parser's
currently configured plugins.
"""
return self._priorized("taggers")
def filters(self):
"""Returns a priorized list of filter functions provided by the
parser's currently configured plugins.
"""
return self._priorized("filters")
def tag(self, text, pos=0, debug=False):
"""Returns a group of syntax nodes corresponding to the given text,
created by matching the Taggers provided by the parser's plugins.
:param text: the text to tag.
:param pos: the position in the text to start tagging at.
"""
# The list out output tags
stack = []
# End position of the previous match
prev = pos
# Priorized list of taggers provided by the parser's plugins
taggers = self.taggers()
print_debug(debug, "Taggers: %r" % taggers)
# Define a function that will make a WordNode from the "interstitial"
# text between matches
def inter(startchar, endchar):
n = syntax.WordNode(text[startchar:endchar])
n.startchar = startchar
n.endchar = endchar
return n
while pos < len(text):
node = None
# Try each tagger to see if it matches at the current position
for tagger in taggers:
node = tagger.match(self, text, pos)
if node:
if node.endchar <= pos:
raise Exception("Token %r did not move cursor forward."
" (%r, %s)" % (tagger, text, pos))
if prev < pos:
tween = inter(prev, pos)
print_debug(debug, "Tween: %r" % tween)
stack.append(tween)
print_debug(debug, "Tagger: %r at %s: %r"
% (tagger, pos, node))
stack.append(node)
prev = pos = node.endchar
break
if not node:
# No taggers matched, move forward
pos += 1
# If there's unmatched text left over on the end, put it in a WordNode
if prev < len(text):
stack.append(inter(prev, len(text)))
# Wrap the list of nodes in a group node
group = self.group(stack)
print_debug(debug, "Tagged group: %r" % group)
return group
def filterize(self, nodes, debug=False):
"""Takes a group of nodes and runs the filters provided by the parser's
plugins.
"""
# Call each filter in the priorized list of plugin filters
print_debug(debug, "Pre-filtered group: %r" % nodes)
for f in self.filters():
print_debug(debug, "..Applying: %r" % f)
nodes = f(self, nodes)
print_debug(debug, "..Result: %r" % nodes)
if nodes is None:
raise Exception("Filter %r did not return anything" % f)
return nodes
def process(self, text, pos=0, debug=False):
"""Returns a group of syntax nodes corresponding to the given text,
tagged by the plugin Taggers and filtered by the plugin filters.
:param text: the text to tag.
:param pos: the position in the text to start tagging at.
"""
nodes = self.tag(text, pos=pos, debug=debug)
nodes = self.filterize(nodes, debug=debug)
return nodes
def parse(self, text, normalize=True, debug=False):
"""Parses the input string and returns a :class:`whoosh.query.Query`
object/tree.
:param text: the string to parse.
:param normalize: whether to call normalize() on the query object/tree
before returning it. This should be left on unless you're trying to
debug the parser output.
:rtype: :class:`whoosh.query.Query`
"""
nodes = self.process(text, debug=debug)
print_debug(debug, "Syntax tree: %r" % nodes)
q = nodes.query(self)
if not q:
q = query.NullQuery
print_debug(debug, "Pre-normalized query: %r" % q)
if normalize:
q = q.normalize()
print_debug(debug, "Normalized query: %r" % q)
return q
def parse_(self, text, normalize=True):
pass
# Premade parser configurations
def MultifieldParser(fieldnames, schema, fieldboosts=None, **kwargs):
"""Returns a QueryParser configured to search in multiple fields.
Instead of assigning unfielded clauses to a default field, this parser
transforms them into an OR clause that searches a list of fields. For
example, if the list of multi-fields is "f1", "f2" and the query string is
"hello there", the class will parse "(f1:hello OR f2:hello) (f1:there OR
f2:there)". This is very useful when you have two textual fields (e.g.
"title" and "content") you want to search by default.
:param fieldnames: a list of field names to search.
:param fieldboosts: an optional dictionary mapping field names to boosts.
"""
from whoosh.qparser.plugins import MultifieldPlugin
p = QueryParser(None, schema, **kwargs)
mfp = MultifieldPlugin(fieldnames, fieldboosts=fieldboosts)
p.add_plugin(mfp)
return p
def SimpleParser(fieldname, schema, **kwargs):
"""Returns a QueryParser configured to support only +, -, and phrase
syntax.
"""
from whoosh.qparser import plugins
pins = [plugins.WhitespacePlugin,
plugins.PlusMinusPlugin,
plugins.PhrasePlugin]
return QueryParser(fieldname, schema, plugins=pins, **kwargs)
def DisMaxParser(fieldboosts, schema, tiebreak=0.0, **kwargs):
"""Returns a QueryParser configured to support only +, -, and phrase
syntax, and which converts individual terms into DisjunctionMax queries
across a set of fields.
:param fieldboosts: a dictionary mapping field names to boosts.
"""
from whoosh.qparser import plugins
mfp = plugins.MultifieldPlugin(list(fieldboosts.keys()),
fieldboosts=fieldboosts,
group=syntax.DisMaxGroup)
pins = [plugins.WhitespacePlugin,
plugins.PlusMinusPlugin,
plugins.PhrasePlugin,
mfp]
return QueryParser(None, schema, plugins=pins, **kwargs)
| gpl-2.0 |
ptisserand/ansible | lib/ansible/modules/network/avi/avi_seproperties.py | 41 | 3649 | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_seproperties
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of SeProperties Avi RESTful Object
description:
- This module is used to configure SeProperties object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
se_agent_properties:
description:
- Seagentproperties settings for seproperties.
se_bootup_properties:
description:
- Sebootupproperties settings for seproperties.
se_runtime_properties:
description:
- Seruntimeproperties settings for seproperties.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
- Default value when not specified in API or module is interpreted by Avi Controller as default.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create SeProperties object
avi_seproperties:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_seproperties
"""
RETURN = '''
obj:
description: SeProperties (api/seproperties) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
se_agent_properties=dict(type='dict',),
se_bootup_properties=dict(type='dict',),
se_runtime_properties=dict(type='dict',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'seproperties',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
seann1/portfolio5 | .meteor/dev_bundle/python/Lib/test/test_shlex.py | 179 | 5300 | # -*- coding: iso-8859-1 -*-
import unittest
import shlex
from test import test_support
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# The original test data set was from shellwords, by Hartmut Goebel.
data = r"""x|x|
foo bar|foo|bar|
foo bar|foo|bar|
foo bar |foo|bar|
foo bar bla fasel|foo|bar|bla|fasel|
x y z xxxx|x|y|z|xxxx|
\x bar|\|x|bar|
\ x bar|\|x|bar|
\ bar|\|bar|
foo \x bar|foo|\|x|bar|
foo \ x bar|foo|\|x|bar|
foo \ bar|foo|\|bar|
foo "bar" bla|foo|"bar"|bla|
"foo" "bar" "bla"|"foo"|"bar"|"bla"|
"foo" bar "bla"|"foo"|bar|"bla"|
"foo" bar bla|"foo"|bar|bla|
foo 'bar' bla|foo|'bar'|bla|
'foo' 'bar' 'bla'|'foo'|'bar'|'bla'|
'foo' bar 'bla'|'foo'|bar|'bla'|
'foo' bar bla|'foo'|bar|bla|
blurb foo"bar"bar"fasel" baz|blurb|foo"bar"bar"fasel"|baz|
blurb foo'bar'bar'fasel' baz|blurb|foo'bar'bar'fasel'|baz|
""|""|
''|''|
foo "" bar|foo|""|bar|
foo '' bar|foo|''|bar|
foo "" "" "" bar|foo|""|""|""|bar|
foo '' '' '' bar|foo|''|''|''|bar|
\""|\|""|
"\"|"\"|
"foo\ bar"|"foo\ bar"|
"foo\\ bar"|"foo\\ bar"|
"foo\\ bar\"|"foo\\ bar\"|
"foo\\" bar\""|"foo\\"|bar|\|""|
"foo\\ bar\" dfadf"|"foo\\ bar\"|dfadf"|
"foo\\\ bar\" dfadf"|"foo\\\ bar\"|dfadf"|
"foo\\\x bar\" dfadf"|"foo\\\x bar\"|dfadf"|
"foo\x bar\" dfadf"|"foo\x bar\"|dfadf"|
\''|\|''|
'foo\ bar'|'foo\ bar'|
'foo\\ bar'|'foo\\ bar'|
"foo\\\x bar\" df'a\ 'df'|"foo\\\x bar\"|df'a|\|'df'|
\"foo"|\|"foo"|
\"foo"\x|\|"foo"|\|x|
"foo\x"|"foo\x"|
"foo\ "|"foo\ "|
foo\ xx|foo|\|xx|
foo\ x\x|foo|\|x|\|x|
foo\ x\x\""|foo|\|x|\|x|\|""|
"foo\ x\x"|"foo\ x\x"|
"foo\ x\x\\"|"foo\ x\x\\"|
"foo\ x\x\\""foobar"|"foo\ x\x\\"|"foobar"|
"foo\ x\x\\"\''"foobar"|"foo\ x\x\\"|\|''|"foobar"|
"foo\ x\x\\"\'"fo'obar"|"foo\ x\x\\"|\|'"fo'|obar"|
"foo\ x\x\\"\'"fo'obar" 'don'\''t'|"foo\ x\x\\"|\|'"fo'|obar"|'don'|\|''|t'|
'foo\ bar'|'foo\ bar'|
'foo\\ bar'|'foo\\ bar'|
foo\ bar|foo|\|bar|
foo#bar\nbaz|foobaz|
:-) ;-)|:|-|)|;|-|)|
áéíóú|á|é|í|ó|ú|
"""
posix_data = r"""x|x|
foo bar|foo|bar|
foo bar|foo|bar|
foo bar |foo|bar|
foo bar bla fasel|foo|bar|bla|fasel|
x y z xxxx|x|y|z|xxxx|
\x bar|x|bar|
\ x bar| x|bar|
\ bar| bar|
foo \x bar|foo|x|bar|
foo \ x bar|foo| x|bar|
foo \ bar|foo| bar|
foo "bar" bla|foo|bar|bla|
"foo" "bar" "bla"|foo|bar|bla|
"foo" bar "bla"|foo|bar|bla|
"foo" bar bla|foo|bar|bla|
foo 'bar' bla|foo|bar|bla|
'foo' 'bar' 'bla'|foo|bar|bla|
'foo' bar 'bla'|foo|bar|bla|
'foo' bar bla|foo|bar|bla|
blurb foo"bar"bar"fasel" baz|blurb|foobarbarfasel|baz|
blurb foo'bar'bar'fasel' baz|blurb|foobarbarfasel|baz|
""||
''||
foo "" bar|foo||bar|
foo '' bar|foo||bar|
foo "" "" "" bar|foo||||bar|
foo '' '' '' bar|foo||||bar|
\"|"|
"\""|"|
"foo\ bar"|foo\ bar|
"foo\\ bar"|foo\ bar|
"foo\\ bar\""|foo\ bar"|
"foo\\" bar\"|foo\|bar"|
"foo\\ bar\" dfadf"|foo\ bar" dfadf|
"foo\\\ bar\" dfadf"|foo\\ bar" dfadf|
"foo\\\x bar\" dfadf"|foo\\x bar" dfadf|
"foo\x bar\" dfadf"|foo\x bar" dfadf|
\'|'|
'foo\ bar'|foo\ bar|
'foo\\ bar'|foo\\ bar|
"foo\\\x bar\" df'a\ 'df"|foo\\x bar" df'a\ 'df|
\"foo|"foo|
\"foo\x|"foox|
"foo\x"|foo\x|
"foo\ "|foo\ |
foo\ xx|foo xx|
foo\ x\x|foo xx|
foo\ x\x\"|foo xx"|
"foo\ x\x"|foo\ x\x|
"foo\ x\x\\"|foo\ x\x\|
"foo\ x\x\\""foobar"|foo\ x\x\foobar|
"foo\ x\x\\"\'"foobar"|foo\ x\x\'foobar|
"foo\ x\x\\"\'"fo'obar"|foo\ x\x\'fo'obar|
"foo\ x\x\\"\'"fo'obar" 'don'\''t'|foo\ x\x\'fo'obar|don't|
"foo\ x\x\\"\'"fo'obar" 'don'\''t' \\|foo\ x\x\'fo'obar|don't|\|
'foo\ bar'|foo\ bar|
'foo\\ bar'|foo\\ bar|
foo\ bar|foo bar|
foo#bar\nbaz|foo|baz|
:-) ;-)|:-)|;-)|
áéíóú|áéíóú|
"""
class ShlexTest(unittest.TestCase):
def setUp(self):
self.data = [x.split("|")[:-1]
for x in data.splitlines()]
self.posix_data = [x.split("|")[:-1]
for x in posix_data.splitlines()]
for item in self.data:
item[0] = item[0].replace(r"\n", "\n")
for item in self.posix_data:
item[0] = item[0].replace(r"\n", "\n")
def splitTest(self, data, comments):
for i in range(len(data)):
l = shlex.split(data[i][0], comments=comments)
self.assertEqual(l, data[i][1:],
"%s: %s != %s" %
(data[i][0], l, data[i][1:]))
def oldSplit(self, s):
ret = []
lex = shlex.shlex(StringIO(s))
tok = lex.get_token()
while tok:
ret.append(tok)
tok = lex.get_token()
return ret
def testSplitPosix(self):
"""Test data splitting with posix parser"""
self.splitTest(self.posix_data, comments=True)
def testCompat(self):
"""Test compatibility interface"""
for i in range(len(self.data)):
l = self.oldSplit(self.data[i][0])
self.assertEqual(l, self.data[i][1:],
"%s: %s != %s" %
(self.data[i][0], l, self.data[i][1:]))
# Allow this test to be used with old shlex.py
if not getattr(shlex, "split", None):
for methname in dir(ShlexTest):
if methname.startswith("test") and methname != "testCompat":
delattr(ShlexTest, methname)
def test_main():
test_support.run_unittest(ShlexTest)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
Solewer/vino-cave | vinocave/settings.py | 1 | 3114 | """
Django settings for vinocave project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vj#grw^2zlpbh^w&0aed2ac_q51p_s#kiw-f*x6(^u_xy_f-jk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'vinocave.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vinocave.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| mit |
Dave667/service | plugin.video.pokerstars.tv/html5lib/treewalkers/__init__.py | 133 | 2416 | """A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are "simpletree", "dom", "etree" and "beautifulsoup"
"simpletree" - a built-in DOM-ish tree type with support for some
more pythonic idioms.
"dom" - The xml.dom.minidom DOM implementation
"pulldom" - The xml.dom.pulldom event stream
"etree" - A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"lxml" - Optimized walker for lxml.etree
"beautifulsoup" - Beautiful soup (if installed)
"genshi" - a Genshi stream
implementation - (Currently applies to the "etree" tree type only). A module
implementing the tree type e.g. xml.etree.ElementTree or
cElementTree."""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType in ("dom", "pulldom", "simpletree"):
mod = __import__(treeType, globals())
treeWalkerCache[treeType] = mod.TreeWalker
elif treeType == "genshi":
import genshistream
treeWalkerCache[treeType] = genshistream.TreeWalker
elif treeType == "beautifulsoup":
import soup
treeWalkerCache[treeType] = soup.TreeWalker
elif treeType == "lxml":
import lxmletree
treeWalkerCache[treeType] = lxmletree.TreeWalker
elif treeType == "etree":
import etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
| gpl-2.0 |
inovtec-solutions/OpenERP | openerp/addons/account_asset/__init__.py | 446 | 1135 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_asset
import account_asset_invoice
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hachreak/invenio-previewer | tests/test_utils.py | 2 | 2890 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test of utilities module."""
from __future__ import absolute_import, print_function
import pytest
from mock import patch
from six import BytesIO
from invenio_previewer import current_previewer
from invenio_previewer.utils import detect_encoding
def test_default_file_reader(app, record_with_file, testfile):
"""Test view by default."""
file_ = current_previewer.record_file_factory(
None, record_with_file, testfile.key)
assert file_.version_id == testfile.version_id
@pytest.mark.parametrize('string, confidence, encoding, detect', [
(u'Γκρήκ Στρίνγκ'.encode('utf-8'), 0.99000, 'UTF-8', 'UTF-8'),
(u'dhǾk: kjd köd, ddȪj@dd.k'.encode('utf-8'), 0.87625, 'UTF-8', None),
(u'क्या हाल तुम या कर रहे हो?'.encode('utf-8'), 0.99000, 'UTF-8', 'UTF-8'),
(u'石原氏 移転は「既定路線」'.encode('euc-jp'), 0.46666, 'EUC-JP', None),
(u'Hi bye sigh die'.encode('utf-8'), 1.00000, 'UTF-8', 'UTF-8'),
(u'Monkey donkey cow crow'.encode('euc-jp'), 0.00000, 'ASCII', None),
(u'Monkey donkey cow crow'.encode('euc-jp'), 0.90000, 'EUC-JP', None),
(u'Monkey donkey cow crow'.encode('euc-jp'), 0.90001, 'EUC-JP', 'EUC-JP'),
(u'Monkey donkey cow crow'.encode('euc-jp'), 0.50000, 'UTF-8', None),
])
def test_detect_encoding(app, string, confidence, encoding, detect):
"""Test encoding detection."""
f = BytesIO(string)
initial_position = f.tell()
with patch('cchardet.detect') as mock_detect:
mock_detect.return_value = {'encoding': encoding,
'confidence': confidence}
assert detect_encoding(f) is detect
assert f.tell() == initial_position
def test_detect_encoding_exception(app):
f = BytesIO(u'Γκρήκ Στρίνγκ'.encode('utf-8'))
with patch('cchardet.detect', Exception):
assert detect_encoding(f) is None
| gpl-2.0 |
dagwieers/ansible | lib/ansible/modules/network/avi/avi_gslbapplicationpersistenceprofile.py | 31 | 3686 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_gslbapplicationpersistenceprofile
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of GslbApplicationPersistenceProfile Avi RESTful Object
description:
- This module is used to configure GslbApplicationPersistenceProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
description:
description:
- Field introduced in 17.1.1.
name:
description:
- A user-friendly name for the persistence profile.
- Field introduced in 17.1.1.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the persistence profile.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create GslbApplicationPersistenceProfile object
avi_gslbapplicationpersistenceprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_gslbapplicationpersistenceprofile
"""
RETURN = '''
obj:
description: GslbApplicationPersistenceProfile (api/gslbapplicationpersistenceprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
description=dict(type='str',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'gslbapplicationpersistenceprofile',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
wearpants/osf.io | api_tests/nodes/views/test_node_exceptions.py | 30 | 5135 | # -*- coding: utf-8 -*-
from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
AuthUserFactory
)
class TestExceptionFormatting(ApiTestCase):
def setUp(self):
super(TestExceptionFormatting, self).setUp()
self.user = AuthUserFactory()
self.non_contrib = AuthUserFactory()
self.title = 'Cool Project'
self.description = 'A Properly Cool Project'
self.category = 'data'
self.project_no_title = {
'data': {
'attributes': {
'description': self.description,
'category': self.category,
'type': 'nodes',
}
}
}
self.private_project = ProjectFactory(is_public=False, creator=self.user)
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.private_url = '/{}nodes/{}/'.format(API_BASE, self.private_project._id)
def test_creates_project_with_no_title_formatting(self):
url = '/{}nodes/'.format(API_BASE)
res = self.app.post_json_api(url, self.project_no_title, auth=self.user.auth, expect_errors=True)
errors = res.json['errors']
assert(isinstance(errors, list))
assert_equal(res.json['errors'][0]['source'], {'pointer': '/data/attributes/title'})
assert_equal(res.json['errors'][0]['detail'], 'This field is required.')
def test_node_does_not_exist_formatting(self):
url = '/{}nodes/{}/'.format(API_BASE, '12345')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
errors = res.json['errors']
assert(isinstance(errors, list))
assert_equal(errors[0], {'detail': 'Not found.'})
def test_forbidden_formatting(self):
res = self.app.get(self.private_url, auth=self.non_contrib.auth, expect_errors=True)
errors = res.json['errors']
assert(isinstance(errors, list))
assert_equal(errors[0], {'detail': 'You do not have permission to perform this action.'})
def test_not_authorized_formatting(self):
res = self.app.get(self.private_url, expect_errors=True)
errors = res.json['errors']
assert(isinstance(errors, list))
assert_equal(errors[0], {'detail': "Authentication credentials were not provided."})
def test_update_project_with_no_title_or_category_formatting(self):
res = self.app.put_json_api(self.private_url, {'data': {'type': 'nodes', 'id': self.private_project._id, 'attributes': {'description': 'New description'}}}, auth=self.user.auth, expect_errors=True)
errors = res.json['errors']
assert(isinstance(errors, list))
assert_equal(len(errors), 2)
errors = res.json['errors']
assert_items_equal([errors[0]['source'], errors[1]['source']],
[{'pointer': '/data/attributes/category'}, {'pointer': '/data/attributes/title'}])
assert_items_equal([errors[0]['detail'], errors[1]['detail']],
['This field is required.', 'This field is required.'])
def test_create_node_link_no_target_formatting(self):
url = self.private_url + 'node_links/'
res = self.app.post_json_api(url, {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': '',
'type': 'nodes',
}
}
}
}
}, auth=self.user.auth, expect_errors=True)
errors = res.json['errors']
assert(isinstance(errors, list))
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source'], {'pointer': '/data/id'})
assert_equal(res.json['errors'][0]['detail'], 'This field may not be blank.')
def test_node_link_already_exists(self):
url = self.private_url + 'node_links/'
res = self.app.post_json_api(url, {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': self.public_project._id,
'type': 'nodes',
}
}
}
}
}, auth=self.user.auth)
assert_equal(res.status_code, 201)
res = self.app.post_json_api(url, {'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': self.public_project._id,
'type': 'nodes'
}
}
}
}}, auth=self.user.auth, expect_errors=True)
errors = res.json['errors']
assert(isinstance(errors, list))
assert_equal(res.status_code, 400)
assert(self.public_project._id in res.json['errors'][0]['detail'])
| apache-2.0 |
garbled1/ansible | lib/ansible/modules/cloud/rackspace/rax_clb_nodes.py | 14 | 8227 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_clb_nodes
short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer
description:
- Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer
version_added: "1.4"
options:
address:
required: false
description:
- IP address or domain name of the node
condition:
required: false
choices:
- enabled
- disabled
- draining
description:
- Condition for the node, which determines its role within the load
balancer
load_balancer_id:
required: true
description:
- Load balancer id
node_id:
required: false
description:
- Node id
port:
required: false
description:
- Port number of the load balanced service on the node
state:
required: false
default: "present"
choices:
- present
- absent
description:
- Indicate desired state of the node
type:
required: false
choices:
- primary
- secondary
description:
- Type of node
wait:
required: false
default: "no"
choices:
- "yes"
- "no"
description:
- Wait for the load balancer to become active before returning
wait_timeout:
required: false
default: 30
description:
- How long to wait before giving up and returning an error
weight:
required: false
description:
- Weight of node
author: "Lukasz Kawczynski (@neuroid)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
# Add a new node to the load balancer
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
address: 10.2.2.3
port: 80
condition: enabled
type: primary
wait: yes
credentials: /path/to/credentials
# Drain connections from a node
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
node_id: 410
condition: draining
wait: yes
credentials: /path/to/credentials
# Remove a node from the load balancer
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
node_id: 410
state: absent
wait: yes
credentials: /path/to/credentials
'''
import os
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_clb_node_to_dict, rax_required_together, setup_rax_module
def _activate_virtualenv(path):
activate_this = os.path.join(path, 'bin', 'activate_this.py')
with open(activate_this) as f:
code = compile(f.read(), activate_this, 'exec')
exec(code)
def _get_node(lb, node_id=None, address=None, port=None):
"""Return a matching node"""
for node in getattr(lb, 'nodes', []):
match_list = []
if node_id is not None:
match_list.append(getattr(node, 'id', None) == node_id)
if address is not None:
match_list.append(getattr(node, 'address', None) == address)
if port is not None:
match_list.append(getattr(node, 'port', None) == port)
if match_list and all(match_list):
return node
return None
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
address=dict(),
condition=dict(choices=['enabled', 'disabled', 'draining']),
load_balancer_id=dict(required=True, type='int'),
node_id=dict(type='int'),
port=dict(type='int'),
state=dict(default='present', choices=['present', 'absent']),
type=dict(choices=['primary', 'secondary']),
virtualenv=dict(type='path'),
wait=dict(default=False, type='bool'),
wait_timeout=dict(default=30, type='int'),
weight=dict(type='int'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
address = module.params['address']
condition = (module.params['condition'] and
module.params['condition'].upper())
load_balancer_id = module.params['load_balancer_id']
node_id = module.params['node_id']
port = module.params['port']
state = module.params['state']
typ = module.params['type'] and module.params['type'].upper()
virtualenv = module.params['virtualenv']
wait = module.params['wait']
wait_timeout = module.params['wait_timeout'] or 1
weight = module.params['weight']
if virtualenv:
try:
_activate_virtualenv(virtualenv)
except IOError as e:
module.fail_json(msg='Failed to activate virtualenv %s (%s)' % (
virtualenv, e))
setup_rax_module(module, pyrax)
if not pyrax.cloud_loadbalancers:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
try:
lb = pyrax.cloud_loadbalancers.get(load_balancer_id)
except pyrax.exc.PyraxException as e:
module.fail_json(msg='%s' % e.message)
node = _get_node(lb, node_id, address, port)
result = rax_clb_node_to_dict(node)
if state == 'absent':
if not node: # Removing a non-existent node
module.exit_json(changed=False, state=state)
try:
lb.delete_node(node)
result = {}
except pyrax.exc.NotFound:
module.exit_json(changed=False, state=state)
except pyrax.exc.PyraxException as e:
module.fail_json(msg='%s' % e.message)
else: # present
if not node:
if node_id: # Updating a non-existent node
msg = 'Node %d not found' % node_id
if lb.nodes:
msg += (' (available nodes: %s)' %
', '.join([str(x.id) for x in lb.nodes]))
module.fail_json(msg=msg)
else: # Creating a new node
try:
node = pyrax.cloudloadbalancers.Node(
address=address, port=port, condition=condition,
weight=weight, type=typ)
resp, body = lb.add_nodes([node])
result.update(body['nodes'][0])
except pyrax.exc.PyraxException as e:
module.fail_json(msg='%s' % e.message)
else: # Updating an existing node
mutable = {
'condition': condition,
'type': typ,
'weight': weight,
}
for name, value in mutable.items():
if value is None or value == getattr(node, name):
mutable.pop(name)
if not mutable:
module.exit_json(changed=False, state=state, node=result)
try:
# The diff has to be set explicitly to update node's weight and
# type; this should probably be fixed in pyrax
lb.update_node(node, diff=mutable)
result.update(mutable)
except pyrax.exc.PyraxException as e:
module.fail_json(msg='%s' % e.message)
if wait:
pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1,
attempts=wait_timeout)
if lb.status != 'ACTIVE':
module.fail_json(
msg='Load balancer not active after %ds (current status: %s)' %
(wait_timeout, lb.status.lower()))
kwargs = {'node': result} if result else {}
module.exit_json(changed=True, state=state, **kwargs)
if __name__ == '__main__':
main()
| gpl-3.0 |
ylow/SFrame | oss_src/unity/python/sframe/util/lockfile/pidlockfile.py | 488 | 6221 | # -*- coding: utf-8 -*-
# pidlockfile.py
#
# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import absolute_import
import os
import sys
import errno
import time
from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock,
LockTimeout)
class PIDLockFile(LockBase):
""" Lockfile implemented as a Unix PID file.
The lock file is a normal file named by the attribute `path`.
A lock's PID file contains a single line of text, containing
the process ID (PID) of the process that acquired the lock.
>>> lock = PIDLockFile('somefile')
>>> lock = PIDLockFile('somefile')
"""
def __init__(self, path, threaded=False, timeout=None):
# pid lockfiles don't support threaded operation, so always force
# False as the threaded arg.
LockBase.__init__(self, path, False, timeout)
dirname = os.path.dirname(self.lock_file)
basename = os.path.split(self.path)[-1]
self.unique_name = self.path
def read_pid(self):
""" Get the PID from the lock file.
"""
return read_pid_from_pidfile(self.path)
def is_locked(self):
""" Test if the lock is currently held.
The lock is held if the PID file for this lock exists.
"""
return os.path.exists(self.path)
def i_am_locking(self):
""" Test if the lock is held by the current process.
Returns ``True`` if the current process ID matches the
number stored in the PID file.
"""
return self.is_locked() and os.getpid() == self.read_pid()
def acquire(self, timeout=None):
""" Acquire the lock.
Creates the PID file for this lock, or raises an error if
the lock could not be acquired.
"""
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
try:
write_pid_to_pidfile(self.path)
except OSError as exc:
if exc.errno == errno.EEXIST:
# The lock creation failed. Maybe sleep a bit.
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout/10 or 0.1)
else:
raise LockFailed("failed to create %s" % self.path)
else:
return
def release(self):
""" Release the lock.
Removes the PID file to release the lock, or raises an
error if the current process does not hold the lock.
"""
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
remove_existing_pidfile(self.path)
def break_lock(self):
""" Break an existing lock.
Removes the PID file if it already exists, otherwise does
nothing.
"""
remove_existing_pidfile(self.path)
def read_pid_from_pidfile(pidfile_path):
""" Read the PID recorded in the named PID file.
Read and return the numeric PID recorded as text in the named
PID file. If the PID file cannot be read, or if the content is
not a valid PID, return ``None``.
"""
pid = None
try:
pidfile = open(pidfile_path, 'r')
except IOError:
pass
else:
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character.
#
# Programs that read PID files should be somewhat flexible
# in what they accept; i.e., they should ignore extra
# whitespace, leading zeroes, absence of the trailing
# newline, or additional lines in the PID file.
line = pidfile.readline().strip()
try:
pid = int(line)
except ValueError:
pass
pidfile.close()
return pid
def write_pid_to_pidfile(pidfile_path):
""" Write the PID in the named PID file.
Get the numeric process ID (“PID”) of the current process
and write it to the named file as a line of text.
"""
open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
open_mode = 0o644
pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
pidfile = os.fdopen(pidfile_fd, 'w')
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character. For
# example, if crond was process number 25, /var/run/crond.pid
# would contain three characters: two, five, and newline.
pid = os.getpid()
line = "%(pid)d\n" % vars()
pidfile.write(line)
pidfile.close()
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
"""
try:
os.remove(pidfile_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
| bsd-3-clause |
jvillacorta/mitmproxy | test/netlib/http/http1/test_read.py | 2 | 11189 | from __future__ import absolute_import, print_function, division
from io import BytesIO
from mock import Mock
import pytest
from netlib.exceptions import HttpException, HttpSyntaxException, HttpReadDisconnect, TcpDisconnect
from netlib.http import Headers
from netlib.http.http1.read import (
read_request, read_response, read_request_head,
read_response_head, read_body, connection_close, expected_http_body_size, _get_first_line,
_read_request_line, _parse_authority_form, _read_response_line, _check_http_version,
_read_headers, _read_chunked, get_header_tokens
)
from netlib.tutils import treq, tresp, raises
from netlib import exceptions
def test_get_header_tokens():
headers = Headers()
assert get_header_tokens(headers, "foo") == []
headers["foo"] = "bar"
assert get_header_tokens(headers, "foo") == ["bar"]
headers["foo"] = "bar, voing"
assert get_header_tokens(headers, "foo") == ["bar", "voing"]
headers.set_all("foo", ["bar, voing", "oink"])
assert get_header_tokens(headers, "foo") == ["bar", "voing", "oink"]
@pytest.mark.parametrize("input", [
b"GET / HTTP/1.1\r\n\r\nskip",
b"GET / HTTP/1.1\r\n\r\nskip",
b"GET / HTTP/1.1\r\n\r\nskip",
b"GET / HTTP/1.1 \r\n\r\nskip",
])
def test_read_request(input):
rfile = BytesIO(input)
r = read_request(rfile)
assert r.method == "GET"
assert r.content == b""
assert r.http_version == "HTTP/1.1"
assert r.timestamp_end
assert rfile.read() == b"skip"
@pytest.mark.parametrize("input", [
b"CONNECT :0 0",
])
def test_read_request_error(input):
rfile = BytesIO(input)
raises(exceptions.HttpException, read_request, rfile)
def test_read_request_head():
rfile = BytesIO(
b"GET / HTTP/1.1\r\n"
b"Content-Length: 4\r\n"
b"\r\n"
b"skip"
)
rfile.reset_timestamps = Mock()
rfile.first_byte_timestamp = 42
r = read_request_head(rfile)
assert r.method == "GET"
assert r.headers["Content-Length"] == "4"
assert r.content is None
assert rfile.reset_timestamps.called
assert r.timestamp_start == 42
assert rfile.read() == b"skip"
@pytest.mark.parametrize("input", [
b"HTTP/1.1 418 I'm a teapot\r\n\r\nbody",
b"HTTP/1.1 418 I'm a teapot\r\n\r\nbody",
b"HTTP/1.1 418 I'm a teapot\r\n\r\nbody",
b"HTTP/1.1 418 I'm a teapot \r\n\r\nbody",
])
def test_read_response(input):
req = treq()
rfile = BytesIO(input)
r = read_response(rfile, req)
assert r.http_version == "HTTP/1.1"
assert r.status_code == 418
assert r.reason == "I'm a teapot"
assert r.content == b"body"
assert r.timestamp_end
def test_read_response_head():
rfile = BytesIO(
b"HTTP/1.1 418 I'm a teapot\r\n"
b"Content-Length: 4\r\n"
b"\r\n"
b"skip"
)
rfile.reset_timestamps = Mock()
rfile.first_byte_timestamp = 42
r = read_response_head(rfile)
assert r.status_code == 418
assert r.headers["Content-Length"] == "4"
assert r.content is None
assert rfile.reset_timestamps.called
assert r.timestamp_start == 42
assert rfile.read() == b"skip"
class TestReadBody(object):
def test_chunked(self):
rfile = BytesIO(b"3\r\nfoo\r\n0\r\n\r\nbar")
body = b"".join(read_body(rfile, None))
assert body == b"foo"
assert rfile.read() == b"bar"
def test_known_size(self):
rfile = BytesIO(b"foobar")
body = b"".join(read_body(rfile, 3))
assert body == b"foo"
assert rfile.read() == b"bar"
def test_known_size_limit(self):
rfile = BytesIO(b"foobar")
with raises(HttpException):
b"".join(read_body(rfile, 3, 2))
def test_known_size_too_short(self):
rfile = BytesIO(b"foo")
with raises(HttpException):
b"".join(read_body(rfile, 6))
def test_unknown_size(self):
rfile = BytesIO(b"foobar")
body = b"".join(read_body(rfile, -1))
assert body == b"foobar"
def test_unknown_size_limit(self):
rfile = BytesIO(b"foobar")
with raises(HttpException):
b"".join(read_body(rfile, -1, 3))
def test_max_chunk_size(self):
rfile = BytesIO(b"123456")
assert list(read_body(rfile, -1, max_chunk_size=None)) == [b"123456"]
rfile = BytesIO(b"123456")
assert list(read_body(rfile, -1, max_chunk_size=1)) == [b"1", b"2", b"3", b"4", b"5", b"6"]
def test_connection_close():
headers = Headers()
assert connection_close(b"HTTP/1.0", headers)
assert not connection_close(b"HTTP/1.1", headers)
headers["connection"] = "keep-alive"
assert not connection_close(b"HTTP/1.1", headers)
headers["connection"] = "close"
assert connection_close(b"HTTP/1.1", headers)
headers["connection"] = "foobar"
assert connection_close(b"HTTP/1.0", headers)
assert not connection_close(b"HTTP/1.1", headers)
def test_expected_http_body_size():
# Expect: 100-continue
assert expected_http_body_size(
treq(headers=Headers(expect="100-continue", content_length="42"))
) == 0
# http://tools.ietf.org/html/rfc7230#section-3.3
assert expected_http_body_size(
treq(method=b"HEAD"),
tresp(headers=Headers(content_length="42"))
) == 0
assert expected_http_body_size(
treq(method=b"CONNECT"),
tresp()
) == 0
for code in (100, 204, 304):
assert expected_http_body_size(
treq(),
tresp(status_code=code)
) == 0
# chunked
assert expected_http_body_size(
treq(headers=Headers(transfer_encoding="chunked")),
) is None
# explicit length
for val in (b"foo", b"-7"):
with raises(HttpSyntaxException):
expected_http_body_size(
treq(headers=Headers(content_length=val))
)
assert expected_http_body_size(
treq(headers=Headers(content_length="42"))
) == 42
# no length
assert expected_http_body_size(
treq(headers=Headers())
) == 0
assert expected_http_body_size(
treq(headers=Headers()), tresp(headers=Headers())
) == -1
def test_get_first_line():
rfile = BytesIO(b"foo\r\nbar")
assert _get_first_line(rfile) == b"foo"
rfile = BytesIO(b"\r\nfoo\r\nbar")
assert _get_first_line(rfile) == b"foo"
with raises(HttpReadDisconnect):
rfile = BytesIO(b"")
_get_first_line(rfile)
with raises(HttpReadDisconnect):
rfile = Mock()
rfile.readline.side_effect = TcpDisconnect
_get_first_line(rfile)
def test_read_request_line():
def t(b):
return _read_request_line(BytesIO(b))
assert (t(b"GET / HTTP/1.1") ==
("relative", b"GET", None, None, None, b"/", b"HTTP/1.1"))
assert (t(b"OPTIONS * HTTP/1.1") ==
("relative", b"OPTIONS", None, None, None, b"*", b"HTTP/1.1"))
assert (t(b"CONNECT foo:42 HTTP/1.1") ==
("authority", b"CONNECT", None, b"foo", 42, None, b"HTTP/1.1"))
assert (t(b"GET http://foo:42/bar HTTP/1.1") ==
("absolute", b"GET", b"http", b"foo", 42, b"/bar", b"HTTP/1.1"))
with raises(HttpSyntaxException):
t(b"GET / WTF/1.1")
with raises(HttpSyntaxException):
t(b"this is not http")
with raises(HttpReadDisconnect):
t(b"")
def test_parse_authority_form():
assert _parse_authority_form(b"foo:42") == (b"foo", 42)
with raises(HttpSyntaxException):
_parse_authority_form(b"foo")
with raises(HttpSyntaxException):
_parse_authority_form(b"foo:bar")
with raises(HttpSyntaxException):
_parse_authority_form(b"foo:99999999")
with raises(HttpSyntaxException):
_parse_authority_form(b"f\x00oo:80")
def test_read_response_line():
def t(b):
return _read_response_line(BytesIO(b))
assert t(b"HTTP/1.1 200 OK") == (b"HTTP/1.1", 200, b"OK")
assert t(b"HTTP/1.1 200") == (b"HTTP/1.1", 200, b"")
# https://github.com/mitmproxy/mitmproxy/issues/784
assert t(b"HTTP/1.1 200 Non-Autoris\xc3\xa9") == (b"HTTP/1.1", 200, b"Non-Autoris\xc3\xa9")
with raises(HttpSyntaxException):
assert t(b"HTTP/1.1")
with raises(HttpSyntaxException):
t(b"HTTP/1.1 OK OK")
with raises(HttpSyntaxException):
t(b"WTF/1.1 200 OK")
with raises(HttpReadDisconnect):
t(b"")
def test_check_http_version():
_check_http_version(b"HTTP/0.9")
_check_http_version(b"HTTP/1.0")
_check_http_version(b"HTTP/1.1")
_check_http_version(b"HTTP/2.0")
with raises(HttpSyntaxException):
_check_http_version(b"WTF/1.0")
with raises(HttpSyntaxException):
_check_http_version(b"HTTP/1.10")
with raises(HttpSyntaxException):
_check_http_version(b"HTTP/1.b")
class TestReadHeaders(object):
@staticmethod
def _read(data):
return _read_headers(BytesIO(data))
def test_read_simple(self):
data = (
b"Header: one\r\n"
b"Header2: two\r\n"
b"\r\n"
)
headers = self._read(data)
assert headers.fields == ((b"Header", b"one"), (b"Header2", b"two"))
def test_read_multi(self):
data = (
b"Header: one\r\n"
b"Header: two\r\n"
b"\r\n"
)
headers = self._read(data)
assert headers.fields == ((b"Header", b"one"), (b"Header", b"two"))
def test_read_continued(self):
data = (
b"Header: one\r\n"
b"\ttwo\r\n"
b"Header2: three\r\n"
b"\r\n"
)
headers = self._read(data)
assert headers.fields == ((b"Header", b"one\r\n two"), (b"Header2", b"three"))
def test_read_continued_err(self):
data = b"\tfoo: bar\r\n"
with raises(HttpSyntaxException):
self._read(data)
def test_read_err(self):
data = b"foo"
with raises(HttpSyntaxException):
self._read(data)
def test_read_empty_name(self):
data = b":foo"
with raises(HttpSyntaxException):
self._read(data)
def test_read_empty_value(self):
data = b"bar:"
headers = self._read(data)
assert headers.fields == ((b"bar", b""),)
def test_read_chunked():
req = treq(content=None)
req.headers["Transfer-Encoding"] = "chunked"
data = b"1\r\na\r\n0\r\n"
with raises(HttpSyntaxException):
b"".join(_read_chunked(BytesIO(data)))
data = b"1\r\na\r\n0\r\n\r\n"
assert b"".join(_read_chunked(BytesIO(data))) == b"a"
data = b"\r\n\r\n1\r\na\r\n1\r\nb\r\n0\r\n\r\n"
assert b"".join(_read_chunked(BytesIO(data))) == b"ab"
data = b"\r\n"
with raises("closed prematurely"):
b"".join(_read_chunked(BytesIO(data)))
data = b"1\r\nfoo"
with raises("malformed chunked body"):
b"".join(_read_chunked(BytesIO(data)))
data = b"foo\r\nfoo"
with raises(HttpSyntaxException):
b"".join(_read_chunked(BytesIO(data)))
data = b"5\r\naaaaa\r\n0\r\n\r\n"
with raises("too large"):
b"".join(_read_chunked(BytesIO(data), limit=2))
| mit |
zooba/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/pip/_internal/operations/freeze.py | 5 | 9279 | from __future__ import absolute_import
import collections
import logging
import os
import re
from pip._vendor import six
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.pkg_resources import RequirementParseError
from pip._internal.exceptions import BadCommand, InstallationError
from pip._internal.req.constructors import (
install_req_from_editable, install_req_from_line,
)
from pip._internal.req.req_file import COMMENT_RE
from pip._internal.utils.misc import (
dist_is_editable, get_installed_distributions,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import (
Iterator, Optional, List, Container, Set, Dict, Tuple, Iterable, Union
)
from pip._internal.cache import WheelCache
from pip._vendor.pkg_resources import (
Distribution, Requirement
)
RequirementInfo = Tuple[Optional[Union[str, Requirement]], bool, List[str]]
logger = logging.getLogger(__name__)
def freeze(
requirement=None, # type: Optional[List[str]]
find_links=None, # type: Optional[List[str]]
local_only=None, # type: Optional[bool]
user_only=None, # type: Optional[bool]
skip_regex=None, # type: Optional[str]
isolated=False, # type: bool
wheel_cache=None, # type: Optional[WheelCache]
exclude_editable=False, # type: bool
skip=() # type: Container[str]
):
# type: (...) -> Iterator[str]
find_links = find_links or []
skip_match = None
if skip_regex:
skip_match = re.compile(skip_regex).search
for link in find_links:
yield '-f %s' % link
installations = {} # type: Dict[str, FrozenRequirement]
for dist in get_installed_distributions(local_only=local_only,
skip=(),
user_only=user_only):
try:
req = FrozenRequirement.from_dist(dist)
except RequirementParseError:
logger.warning(
"Could not parse requirement: %s",
dist.project_name
)
continue
if exclude_editable and req.editable:
continue
installations[req.name] = req
if requirement:
# the options that don't get turned into an InstallRequirement
# should only be emitted once, even if the same option is in multiple
# requirements files, so we need to keep track of what has been emitted
# so that we don't emit it again if it's seen again
emitted_options = set() # type: Set[str]
# keep track of which files a requirement is in so that we can
# give an accurate warning if a requirement appears multiple times.
req_files = collections.defaultdict(list) # type: Dict[str, List[str]]
for req_file_path in requirement:
with open(req_file_path) as req_file:
for line in req_file:
if (not line.strip() or
line.strip().startswith('#') or
(skip_match and skip_match(line)) or
line.startswith((
'-r', '--requirement',
'-Z', '--always-unzip',
'-f', '--find-links',
'-i', '--index-url',
'--pre',
'--trusted-host',
'--process-dependency-links',
'--extra-index-url'))):
line = line.rstrip()
if line not in emitted_options:
emitted_options.add(line)
yield line
continue
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
line_req = install_req_from_editable(
line,
isolated=isolated,
wheel_cache=wheel_cache,
)
else:
line_req = install_req_from_line(
COMMENT_RE.sub('', line).strip(),
isolated=isolated,
wheel_cache=wheel_cache,
)
if not line_req.name:
logger.info(
"Skipping line in requirement file [%s] because "
"it's not clear what it would install: %s",
req_file_path, line.strip(),
)
logger.info(
" (add #egg=PackageName to the URL to avoid"
" this warning)"
)
elif line_req.name not in installations:
# either it's not installed, or it is installed
# but has been processed already
if not req_files[line_req.name]:
logger.warning(
"Requirement file [%s] contains %s, but "
"package %r is not installed",
req_file_path,
COMMENT_RE.sub('', line).strip(), line_req.name
)
else:
req_files[line_req.name].append(req_file_path)
else:
yield str(installations[line_req.name]).rstrip()
del installations[line_req.name]
req_files[line_req.name].append(req_file_path)
# Warn about requirements that were included multiple times (in a
# single requirements file or in different requirements files).
for name, files in six.iteritems(req_files):
if len(files) > 1:
logger.warning("Requirement %s included multiple times [%s]",
name, ', '.join(sorted(set(files))))
yield(
'## The following requirements were added by '
'pip freeze:'
)
for installation in sorted(
installations.values(), key=lambda x: x.name.lower()):
if canonicalize_name(installation.name) not in skip:
yield str(installation).rstrip()
def get_requirement_info(dist):
# type: (Distribution) -> RequirementInfo
"""
Compute and return values (req, editable, comments) for use in
FrozenRequirement.from_dist().
"""
if not dist_is_editable(dist):
return (None, False, [])
location = os.path.normcase(os.path.abspath(dist.location))
from pip._internal.vcs import vcs, RemoteNotFoundError
vc_type = vcs.get_backend_type(location)
if not vc_type:
req = dist.as_requirement()
logger.debug(
'No VCS found for editable requirement {!r} in: {!r}', req,
location,
)
comments = [
'# Editable install with no version control ({})'.format(req)
]
return (location, True, comments)
try:
req = vc_type.get_src_requirement(location, dist.project_name)
except RemoteNotFoundError:
req = dist.as_requirement()
comments = [
'# Editable {} install with no remote ({})'.format(
vc_type.__name__, req,
)
]
return (location, True, comments)
except BadCommand:
logger.warning(
'cannot determine version of editable source in %s '
'(%s command not found in path)',
location,
vc_type.name,
)
return (None, True, [])
except InstallationError as exc:
logger.warning(
"Error when trying to get requirement for VCS system %s, "
"falling back to uneditable format", exc
)
else:
if req is not None:
return (req, True, [])
logger.warning(
'Could not determine repository location of %s', location
)
comments = ['## !! Could not determine repository location']
return (None, False, comments)
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
# type: (str, Union[str, Requirement], bool, Iterable[str]) -> None
self.name = name
self.req = req
self.editable = editable
self.comments = comments
@classmethod
def from_dist(cls, dist):
# type: (Distribution) -> FrozenRequirement
req, editable, comments = get_requirement_info(dist)
if req is None:
req = dist.as_requirement()
return cls(dist.project_name, req, editable, comments=comments)
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
| apache-2.0 |
tml/pyxer | src/pyxer/paster/__init__.py | 2 | 2810 | # -*- coding: UTF-8 -*-
#############################################
## (C)opyright by Dirk Holtwick, 2008 ##
## All rights reserved ##
#############################################
from paste import httpserver
from pyxer.app import make_app
#def serve(opt):
# if opt.reload:
# import paste.reloader as reloader
# reloader.install()
# httpserver.serve(make_app(), host=opt.host, port=int(opt.port))
import pyxer
import os
import os.path
import sys
from shutil import *
from pyxer.app import make_app
from pyxer.utils import call_virtual, find_name, find_root, call_script
_paster_ini = """
[app:main]
use = egg:pyxer#main
[server:main]
use = egg:Paste#http
host = 127.0.0.1
port = 8080
# Logging configuration
[loggers]
keys = root, app
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = DEBUG
handlers = console
[logger_app]
level = DEBUG
handlers =
qualname = app
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(asctime)s,%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S
""".lstrip()
_setup_py = """
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name = "pyxer-project",
version = "",
description = "",
license = "",
author = "",
author_email = "",
keywords = "pyxer",
requires = ["pyxer"],
packages = find_packages(exclude=['ez_setup']),
include_package_data = True,
long_description = '',
classifiers = [x.strip() for x in ''.strip().splitlines()],
entry_points='''
[paste.app_factory]
main = pyxer.app:make_app
[paste.app_install]
main = pylons.util:PylonsInstaller
''',
)
""".lstrip()
def system(cmd):
cmd = cmd.strip()
print "Command:", cmd
return os.system(cmd)
def setup(opt):
# Create paster.ini
if not os.path.isfile("paster.ini"): #opt.update or
print "create paster.ini"
open("paster.ini", "w").write(_paster_ini)
def serve(opt, options = [], daemon = ""):
root = find_root()
command = "serve"
# setup(opt)
if daemon:
options.append("--daemon")
if opt.debug:
options.append("-v")
if opt.reload:
options.append("--reload")
ini = os.path.join(root, "development.ini")
call_script(["paster", command] + options + [ini, daemon])
def start(opt):
serve(opt, options = ["--daemon"])
def stop(opt):
serve(opt, options = ["--stop-daemon"])
def status(opt):
serve(opt, options = ["--status"])
| mit |
pbrady/sympy | sympy/solvers/polysys.py | 103 | 9370 | """Solvers of systems of polynomial equations. """
from __future__ import print_function, division
from sympy.core import S
from sympy.polys import Poly, groebner, roots
from sympy.polys.polytools import parallel_poly_from_expr
from sympy.polys.polyerrors import (ComputationFailed,
PolificationFailed, CoercionFailed)
from sympy.simplify import rcollect
from sympy.utilities import default_sort_key, postfixes
class SolveFailed(Exception):
"""Raised when solver's conditions weren't met. """
def solve_poly_system(seq, *gens, **args):
"""
Solve a system of polynomial equations.
Examples
========
>>> from sympy import solve_poly_system
>>> from sympy.abc import x, y
>>> solve_poly_system([x*y - 2*y, 2*y**2 - x**2], x, y)
[(0, 0), (2, -sqrt(2)), (2, sqrt(2))]
"""
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('solve_poly_system', len(seq), exc)
if len(polys) == len(opt.gens) == 2:
f, g = polys
a, b = f.degree_list()
c, d = g.degree_list()
if a <= 2 and b <= 2 and c <= 2 and d <= 2:
try:
return solve_biquadratic(f, g, opt)
except SolveFailed:
pass
return solve_generic(polys, opt)
def solve_biquadratic(f, g, opt):
"""Solve a system of two bivariate quadratic polynomial equations.
Examples
========
>>> from sympy.polys import Options, Poly
>>> from sympy.abc import x, y
>>> from sympy.solvers.polysys import solve_biquadratic
>>> NewOption = Options((x, y), {'domain': 'ZZ'})
>>> a = Poly(y**2 - 4 + x, y, x, domain='ZZ')
>>> b = Poly(y*2 + 3*x - 7, y, x, domain='ZZ')
>>> solve_biquadratic(a, b, NewOption)
[(1/3, 3), (41/27, 11/9)]
>>> a = Poly(y + x**2 - 3, y, x, domain='ZZ')
>>> b = Poly(-y + x - 4, y, x, domain='ZZ')
>>> solve_biquadratic(a, b, NewOption)
[(-sqrt(29)/2 + 7/2, -sqrt(29)/2 - 1/2), (sqrt(29)/2 + 7/2, -1/2 + \
sqrt(29)/2)]
"""
G = groebner([f, g])
if len(G) == 1 and G[0].is_ground:
return None
if len(G) != 2:
raise SolveFailed
p, q = G
x, y = opt.gens
p = Poly(p, x, expand=False)
q = q.ltrim(-1)
p_roots = [ rcollect(expr, y) for expr in roots(p).keys() ]
q_roots = list(roots(q).keys())
solutions = []
for q_root in q_roots:
for p_root in p_roots:
solution = (p_root.subs(y, q_root), q_root)
solutions.append(solution)
return sorted(solutions, key=default_sort_key)
def solve_generic(polys, opt):
"""
Solve a generic system of polynomial equations.
Returns all possible solutions over C[x_1, x_2, ..., x_m] of a
set F = { f_1, f_2, ..., f_n } of polynomial equations, using
Groebner basis approach. For now only zero-dimensional systems
are supported, which means F can have at most a finite number
of solutions.
The algorithm works by the fact that, supposing G is the basis
of F with respect to an elimination order (here lexicographic
order is used), G and F generate the same ideal, they have the
same set of solutions. By the elimination property, if G is a
reduced, zero-dimensional Groebner basis, then there exists an
univariate polynomial in G (in its last variable). This can be
solved by computing its roots. Substituting all computed roots
for the last (eliminated) variable in other elements of G, new
polynomial system is generated. Applying the above procedure
recursively, a finite number of solutions can be found.
The ability of finding all solutions by this procedure depends
on the root finding algorithms. If no solutions were found, it
means only that roots() failed, but the system is solvable. To
overcome this difficulty use numerical algorithms instead.
References
==========
.. [Buchberger01] B. Buchberger, Groebner Bases: A Short
Introduction for Systems Theorists, In: R. Moreno-Diaz,
B. Buchberger, J.L. Freire, Proceedings of EUROCAST'01,
February, 2001
.. [Cox97] D. Cox, J. Little, D. O'Shea, Ideals, Varieties
and Algorithms, Springer, Second Edition, 1997, pp. 112
Examples
========
>>> from sympy.polys import Poly, Options
>>> from sympy.solvers.polysys import solve_generic
>>> from sympy.abc import x, y
>>> NewOption = Options((x, y), {'domain': 'ZZ'})
>>> a = Poly(x - y + 5, x, y, domain='ZZ')
>>> b = Poly(x + y - 3, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(-1, 4)]
>>> a = Poly(x - 2*y + 5, x, y, domain='ZZ')
>>> b = Poly(2*x - y - 3, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(11/3, 13/3)]
>>> a = Poly(x**2 + y, x, y, domain='ZZ')
>>> b = Poly(x + y*4, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(0, 0), (1/4, -1/16)]
"""
def _is_univariate(f):
"""Returns True if 'f' is univariate in its last variable. """
for monom in f.monoms():
if any(m > 0 for m in monom[:-1]):
return False
return True
def _subs_root(f, gen, zero):
"""Replace generator with a root so that the result is nice. """
p = f.as_expr({gen: zero})
if f.degree(gen) >= 2:
p = p.expand(deep=False)
return p
def _solve_reduced_system(system, gens, entry=False):
"""Recursively solves reduced polynomial systems. """
if len(system) == len(gens) == 1:
zeros = list(roots(system[0], gens[-1]).keys())
return [ (zero,) for zero in zeros ]
basis = groebner(system, gens, polys=True)
if len(basis) == 1 and basis[0].is_ground:
if not entry:
return []
else:
return None
univariate = list(filter(_is_univariate, basis))
if len(univariate) == 1:
f = univariate.pop()
else:
raise NotImplementedError("only zero-dimensional systems supported (finite number of solutions)")
gens = f.gens
gen = gens[-1]
zeros = list(roots(f.ltrim(gen)).keys())
if not zeros:
return []
if len(basis) == 1:
return [ (zero,) for zero in zeros ]
solutions = []
for zero in zeros:
new_system = []
new_gens = gens[:-1]
for b in basis[:-1]:
eq = _subs_root(b, gen, zero)
if eq is not S.Zero:
new_system.append(eq)
for solution in _solve_reduced_system(new_system, new_gens):
solutions.append(solution + (zero,))
return solutions
try:
result = _solve_reduced_system(polys, opt.gens, entry=True)
except CoercionFailed:
raise NotImplementedError
if result is not None:
return sorted(result, key=default_sort_key)
else:
return None
def solve_triangulated(polys, *gens, **args):
"""
Solve a polynomial system using Gianni-Kalkbrenner algorithm.
The algorithm proceeds by computing one Groebner basis in the ground
domain and then by iteratively computing polynomial factorizations in
appropriately constructed algebraic extensions of the ground domain.
Examples
========
>>> from sympy.solvers.polysys import solve_triangulated
>>> from sympy.abc import x, y, z
>>> F = [x**2 + y + z - 1, x + y**2 + z - 1, x + y + z**2 - 1]
>>> solve_triangulated(F, x, y, z)
[(0, 0, 1), (0, 1, 0), (1, 0, 0)]
References
==========
1. Patrizia Gianni, Teo Mora, Algebraic Solution of System of
Polynomial Equations using Groebner Bases, AAECC-5 on Applied Algebra,
Algebraic Algorithms and Error-Correcting Codes, LNCS 356 247--257, 1989
"""
G = groebner(polys, gens, polys=True)
G = list(reversed(G))
domain = args.get('domain')
if domain is not None:
for i, g in enumerate(G):
G[i] = g.set_domain(domain)
f, G = G[0].ltrim(-1), G[1:]
dom = f.get_domain()
zeros = f.ground_roots()
solutions = set([])
for zero in zeros:
solutions.add(((zero,), dom))
var_seq = reversed(gens[:-1])
vars_seq = postfixes(gens[1:])
for var, vars in zip(var_seq, vars_seq):
_solutions = set([])
for values, dom in solutions:
H, mapping = [], list(zip(vars, values))
for g in G:
_vars = (var,) + vars
if g.has_only_gens(*_vars) and g.degree(var) != 0:
h = g.ltrim(var).eval(dict(mapping))
if g.degree(var) == h.degree():
H.append(h)
p = min(H, key=lambda h: h.degree())
zeros = p.ground_roots()
for zero in zeros:
if not zero.is_Rational:
dom_zero = dom.algebraic_field(zero)
else:
dom_zero = dom
_solutions.add(((zero,) + values, dom_zero))
solutions = _solutions
solutions = list(solutions)
for i, (solution, _) in enumerate(solutions):
solutions[i] = solution
return sorted(solutions, key=default_sort_key)
| bsd-3-clause |
Poles/Poles | platforms/linux/JsonCpp/scons-local-2.3.0/SCons/Tool/tex.py | 11 | 39943 | """SCons.Tool.tex
Tool-specific initialization for TeX.
Generates .dvi files from .tex files
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/tex.py 2013/03/03 09:48:35 garyo"
import os.path
import re
import shutil
import sys
import platform
import glob
import SCons.Action
import SCons.Node
import SCons.Node.FS
import SCons.Util
import SCons.Scanner.LaTeX
Verbose = False
must_rerun_latex = True
# these are files that just need to be checked for changes and then rerun latex
check_suffixes = ['.toc', '.lof', '.lot', '.out', '.nav', '.snm']
# these are files that require bibtex or makeindex to be run when they change
all_suffixes = check_suffixes + ['.bbl', '.idx', '.nlo', '.glo', '.acn', '.bcf']
#
# regular expressions used to search for Latex features
# or outputs that require rerunning latex
#
# search for all .aux files opened by latex (recorded in the .fls file)
openout_aux_re = re.compile(r"OUTPUT *(.*\.aux)")
# search for all .bcf files opened by latex (recorded in the .fls file)
# for use by biber
openout_bcf_re = re.compile(r"OUTPUT *(.*\.bcf)")
#printindex_re = re.compile(r"^[^%]*\\printindex", re.MULTILINE)
#printnomenclature_re = re.compile(r"^[^%]*\\printnomenclature", re.MULTILINE)
#printglossary_re = re.compile(r"^[^%]*\\printglossary", re.MULTILINE)
# search to find rerun warnings
warning_rerun_str = '(^LaTeX Warning:.*Rerun)|(^Package \w+ Warning:.*Rerun)'
warning_rerun_re = re.compile(warning_rerun_str, re.MULTILINE)
# search to find citation rerun warnings
rerun_citations_str = "^LaTeX Warning:.*\n.*Rerun to get citations correct"
rerun_citations_re = re.compile(rerun_citations_str, re.MULTILINE)
# search to find undefined references or citations warnings
undefined_references_str = '(^LaTeX Warning:.*undefined references)|(^Package \w+ Warning:.*undefined citations)'
undefined_references_re = re.compile(undefined_references_str, re.MULTILINE)
# used by the emitter
auxfile_re = re.compile(r".", re.MULTILINE)
tableofcontents_re = re.compile(r"^[^%\n]*\\tableofcontents", re.MULTILINE)
makeindex_re = re.compile(r"^[^%\n]*\\makeindex", re.MULTILINE)
bibliography_re = re.compile(r"^[^%\n]*\\bibliography", re.MULTILINE)
bibunit_re = re.compile(r"^[^%\n]*\\begin\{bibunit\}", re.MULTILINE)
multibib_re = re.compile(r"^[^%\n]*\\newcites\{([^\}]*)\}", re.MULTILINE)
addbibresource_re = re.compile(r"^[^%\n]*\\(addbibresource|addglobalbib|addsectionbib)", re.MULTILINE)
listoffigures_re = re.compile(r"^[^%\n]*\\listoffigures", re.MULTILINE)
listoftables_re = re.compile(r"^[^%\n]*\\listoftables", re.MULTILINE)
hyperref_re = re.compile(r"^[^%\n]*\\usepackage.*\{hyperref\}", re.MULTILINE)
makenomenclature_re = re.compile(r"^[^%\n]*\\makenomenclature", re.MULTILINE)
makeglossary_re = re.compile(r"^[^%\n]*\\makeglossary", re.MULTILINE)
makeglossaries_re = re.compile(r"^[^%\n]*\\makeglossaries", re.MULTILINE)
makeacronyms_re = re.compile(r"^[^%\n]*\\makeglossaries", re.MULTILINE)
beamer_re = re.compile(r"^[^%\n]*\\documentclass\{beamer\}", re.MULTILINE)
regex = r'^[^%\n]*\\newglossary\s*\[([^\]]+)\]?\s*\{([^}]*)\}\s*\{([^}]*)\}\s*\{([^}]*)\}\s*\{([^}]*)\}'
newglossary_re = re.compile(regex, re.MULTILINE)
newglossary_suffix = []
# search to find all files included by Latex
include_re = re.compile(r'^[^%\n]*\\(?:include|input){([^}]*)}', re.MULTILINE)
includeOnly_re = re.compile(r'^[^%\n]*\\(?:include){([^}]*)}', re.MULTILINE)
# search to find all graphics files included by Latex
includegraphics_re = re.compile(r'^[^%\n]*\\(?:includegraphics(?:\[[^\]]+\])?){([^}]*)}', re.MULTILINE)
# search to find all files opened by Latex (recorded in .log file)
openout_re = re.compile(r"OUTPUT *(.*)")
# list of graphics file extensions for TeX and LaTeX
TexGraphics = SCons.Scanner.LaTeX.TexGraphics
LatexGraphics = SCons.Scanner.LaTeX.LatexGraphics
# An Action sufficient to build any generic tex file.
TeXAction = None
# An action to build a latex file. This action might be needed more
# than once if we are dealing with labels and bibtex.
LaTeXAction = None
# An action to run BibTeX on a file.
BibTeXAction = None
# An action to run Biber on a file.
BiberAction = None
# An action to run MakeIndex on a file.
MakeIndexAction = None
# An action to run MakeIndex (for nomencl) on a file.
MakeNclAction = None
# An action to run MakeIndex (for glossary) on a file.
MakeGlossaryAction = None
# An action to run MakeIndex (for acronyms) on a file.
MakeAcronymsAction = None
# An action to run MakeIndex (for newglossary commands) on a file.
MakeNewGlossaryAction = None
# Used as a return value of modify_env_var if the variable is not set.
_null = SCons.Scanner.LaTeX._null
modify_env_var = SCons.Scanner.LaTeX.modify_env_var
def check_file_error_message(utility, filename='log'):
msg = '%s returned an error, check the %s file\n' % (utility, filename)
sys.stdout.write(msg)
def FindFile(name,suffixes,paths,env,requireExt=False):
if requireExt:
name,ext = SCons.Util.splitext(name)
# if the user gave an extension use it.
if ext:
name = name + ext
if Verbose:
print " searching for '%s' with extensions: " % name,suffixes
for path in paths:
testName = os.path.join(path,name)
if Verbose:
print " look for '%s'" % testName
if os.path.isfile(testName):
if Verbose:
print " found '%s'" % testName
return env.fs.File(testName)
else:
name_ext = SCons.Util.splitext(testName)[1]
if name_ext:
continue
# if no suffix try adding those passed in
for suffix in suffixes:
testNameExt = testName + suffix
if Verbose:
print " look for '%s'" % testNameExt
if os.path.isfile(testNameExt):
if Verbose:
print " found '%s'" % testNameExt
return env.fs.File(testNameExt)
if Verbose:
print " did not find '%s'" % name
return None
def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None):
"""A builder for LaTeX files that checks the output in the aux file
and decides how many times to use LaTeXAction, and BibTeXAction."""
global must_rerun_latex
# This routine is called with two actions. In this file for DVI builds
# with LaTeXAction and from the pdflatex.py with PDFLaTeXAction
# set this up now for the case where the user requests a different extension
# for the target filename
if (XXXLaTeXAction == LaTeXAction):
callerSuffix = ".dvi"
else:
callerSuffix = env['PDFSUFFIX']
basename = SCons.Util.splitext(str(source[0]))[0]
basedir = os.path.split(str(source[0]))[0]
basefile = os.path.split(str(basename))[1]
abspath = os.path.abspath(basedir)
targetext = os.path.splitext(str(target[0]))[1]
targetdir = os.path.split(str(target[0]))[0]
saved_env = {}
for var in SCons.Scanner.LaTeX.LaTeX.env_variables:
saved_env[var] = modify_env_var(env, var, abspath)
# Create base file names with the target directory since the auxiliary files
# will be made there. That's because the *COM variables have the cd
# command in the prolog. We check
# for the existence of files before opening them--even ones like the
# aux file that TeX always creates--to make it possible to write tests
# with stubs that don't necessarily generate all of the same files.
targetbase = os.path.join(targetdir, basefile)
# if there is a \makeindex there will be a .idx and thus
# we have to run makeindex at least once to keep the build
# happy even if there is no index.
# Same for glossaries, nomenclature, and acronyms
src_content = source[0].get_text_contents()
run_makeindex = makeindex_re.search(src_content) and not os.path.isfile(targetbase + '.idx')
run_nomenclature = makenomenclature_re.search(src_content) and not os.path.isfile(targetbase + '.nlo')
run_glossary = makeglossary_re.search(src_content) and not os.path.isfile(targetbase + '.glo')
run_glossaries = makeglossaries_re.search(src_content) and not os.path.isfile(targetbase + '.glo')
run_acronyms = makeacronyms_re.search(src_content) and not os.path.isfile(targetbase + '.acn')
saved_hashes = {}
suffix_nodes = {}
for suffix in all_suffixes+sum(newglossary_suffix, []):
theNode = env.fs.File(targetbase + suffix)
suffix_nodes[suffix] = theNode
saved_hashes[suffix] = theNode.get_csig()
if Verbose:
print "hashes: ",saved_hashes
must_rerun_latex = True
# .aux files already processed by BibTex
already_bibtexed = []
#
# routine to update MD5 hash and compare
#
def check_MD5(filenode, suffix):
global must_rerun_latex
# two calls to clear old csig
filenode.clear_memoized_values()
filenode.ninfo = filenode.new_ninfo()
new_md5 = filenode.get_csig()
if saved_hashes[suffix] == new_md5:
if Verbose:
print "file %s not changed" % (targetbase+suffix)
return False # unchanged
saved_hashes[suffix] = new_md5
must_rerun_latex = True
if Verbose:
print "file %s changed, rerunning Latex, new hash = " % (targetbase+suffix), new_md5
return True # changed
# generate the file name that latex will generate
resultfilename = targetbase + callerSuffix
count = 0
while (must_rerun_latex and count < int(env.subst('$LATEXRETRIES'))) :
result = XXXLaTeXAction(target, source, env)
if result != 0:
return result
count = count + 1
must_rerun_latex = False
# Decide if various things need to be run, or run again.
# Read the log file to find warnings/errors
logfilename = targetbase + '.log'
logContent = ''
if os.path.isfile(logfilename):
logContent = open(logfilename, "rb").read()
# Read the fls file to find all .aux files
flsfilename = targetbase + '.fls'
flsContent = ''
auxfiles = []
if os.path.isfile(flsfilename):
flsContent = open(flsfilename, "rb").read()
auxfiles = openout_aux_re.findall(flsContent)
# remove duplicates
dups = {}
for x in auxfiles:
dups[x] = 1
auxfiles = list(dups.keys())
bcffiles = []
if os.path.isfile(flsfilename):
flsContent = open(flsfilename, "rb").read()
bcffiles = openout_bcf_re.findall(flsContent)
# remove duplicates
dups = {}
for x in bcffiles:
dups[x] = 1
bcffiles = list(dups.keys())
if Verbose:
print "auxfiles ",auxfiles
print "bcffiles ",bcffiles
# Now decide if bibtex will need to be run.
# The information that bibtex reads from the .aux file is
# pass-independent. If we find (below) that the .bbl file is unchanged,
# then the last latex saw a correct bibliography.
# Therefore only do this once
# Go through all .aux files and remember the files already done.
for auxfilename in auxfiles:
if auxfilename not in already_bibtexed:
already_bibtexed.append(auxfilename)
target_aux = os.path.join(targetdir, auxfilename)
if os.path.isfile(target_aux):
content = open(target_aux, "rb").read()
if content.find("bibdata") != -1:
if Verbose:
print "Need to run bibtex on ",auxfilename
bibfile = env.fs.File(SCons.Util.splitext(target_aux)[0])
result = BibTeXAction(bibfile, bibfile, env)
if result != 0:
check_file_error_message(env['BIBTEX'], 'blg')
must_rerun_latex = True
# Now decide if biber will need to be run.
# When the backend for biblatex is biber (by choice or default) the
# citation information is put in the .bcf file.
# The information that biber reads from the .bcf file is
# pass-independent. If we find (below) that the .bbl file is unchanged,
# then the last latex saw a correct bibliography.
# Therefore only do this once
# Go through all .bcf files and remember the files already done.
for bcffilename in bcffiles:
if bcffilename not in already_bibtexed:
already_bibtexed.append(bcffilename)
target_bcf = os.path.join(targetdir, bcffilename)
if os.path.isfile(target_bcf):
content = open(target_bcf, "rb").read()
if content.find("bibdata") != -1:
if Verbose:
print "Need to run biber on ",bcffilename
bibfile = env.fs.File(SCons.Util.splitext(target_bcf)[0])
result = BiberAction(bibfile, bibfile, env)
if result != 0:
check_file_error_message(env['BIBER'], 'blg')
must_rerun_latex = True
# Now decide if latex will need to be run again due to index.
if check_MD5(suffix_nodes['.idx'],'.idx') or (count == 1 and run_makeindex):
# We must run makeindex
if Verbose:
print "Need to run makeindex"
idxfile = suffix_nodes['.idx']
result = MakeIndexAction(idxfile, idxfile, env)
if result != 0:
check_file_error_message(env['MAKEINDEX'], 'ilg')
return result
# TO-DO: need to add a way for the user to extend this list for whatever
# auxiliary files they create in other (or their own) packages
# Harder is case is where an action needs to be called -- that should be rare (I hope?)
for index in check_suffixes:
check_MD5(suffix_nodes[index],index)
# Now decide if latex will need to be run again due to nomenclature.
if check_MD5(suffix_nodes['.nlo'],'.nlo') or (count == 1 and run_nomenclature):
# We must run makeindex
if Verbose:
print "Need to run makeindex for nomenclature"
nclfile = suffix_nodes['.nlo']
result = MakeNclAction(nclfile, nclfile, env)
if result != 0:
check_file_error_message('%s (nomenclature)' % env['MAKENCL'],
'nlg')
#return result
# Now decide if latex will need to be run again due to glossary.
if check_MD5(suffix_nodes['.glo'],'.glo') or (count == 1 and run_glossaries) or (count == 1 and run_glossary):
# We must run makeindex
if Verbose:
print "Need to run makeindex for glossary"
glofile = suffix_nodes['.glo']
result = MakeGlossaryAction(glofile, glofile, env)
if result != 0:
check_file_error_message('%s (glossary)' % env['MAKEGLOSSARY'],
'glg')
#return result
# Now decide if latex will need to be run again due to acronyms.
if check_MD5(suffix_nodes['.acn'],'.acn') or (count == 1 and run_acronyms):
# We must run makeindex
if Verbose:
print "Need to run makeindex for acronyms"
acrfile = suffix_nodes['.acn']
result = MakeAcronymsAction(acrfile, acrfile, env)
if result != 0:
check_file_error_message('%s (acronyms)' % env['MAKEACRONYMS'],
'alg')
return result
# Now decide if latex will need to be run again due to newglossary command.
for ig in range(len(newglossary_suffix)):
if check_MD5(suffix_nodes[newglossary_suffix[ig][2]],newglossary_suffix[ig][2]) or (count == 1):
# We must run makeindex
if Verbose:
print "Need to run makeindex for newglossary"
newglfile = suffix_nodes[newglossary_suffix[ig][2]]
MakeNewGlossaryAction = SCons.Action.Action("$MAKENEWGLOSSARY ${SOURCE.filebase}%s -s ${SOURCE.filebase}.ist -t ${SOURCE.filebase}%s -o ${SOURCE.filebase}%s" % (newglossary_suffix[ig][2],newglossary_suffix[ig][0],newglossary_suffix[ig][1]), "$MAKENEWGLOSSARYCOMSTR")
result = MakeNewGlossaryAction(newglfile, newglfile, env)
if result != 0:
check_file_error_message('%s (newglossary)' % env['MAKENEWGLOSSARY'],
newglossary_suffix[ig][0])
return result
# Now decide if latex needs to be run yet again to resolve warnings.
if warning_rerun_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to latex or package rerun warning"
if rerun_citations_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to 'Rerun to get citations correct' warning"
if undefined_references_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to undefined references or citations"
if (count >= int(env.subst('$LATEXRETRIES')) and must_rerun_latex):
print "reached max number of retries on Latex ,",int(env.subst('$LATEXRETRIES'))
# end of while loop
# rename Latex's output to what the target name is
if not (str(target[0]) == resultfilename and os.path.isfile(resultfilename)):
if os.path.isfile(resultfilename):
print "move %s to %s" % (resultfilename, str(target[0]), )
shutil.move(resultfilename,str(target[0]))
# Original comment (when TEXPICTS was not restored):
# The TEXPICTS enviroment variable is needed by a dvi -> pdf step
# later on Mac OSX so leave it
#
# It is also used when searching for pictures (implicit dependencies).
# Why not set the variable again in the respective builder instead
# of leaving local modifications in the environment? What if multiple
# latex builds in different directories need different TEXPICTS?
for var in SCons.Scanner.LaTeX.LaTeX.env_variables:
if var == 'TEXPICTS':
continue
if saved_env[var] is _null:
try:
del env['ENV'][var]
except KeyError:
pass # was never set
else:
env['ENV'][var] = saved_env[var]
return result
def LaTeXAuxAction(target = None, source= None, env=None):
result = InternalLaTeXAuxAction( LaTeXAction, target, source, env )
return result
LaTeX_re = re.compile("\\\\document(style|class)")
def is_LaTeX(flist,env,abspath):
"""Scan a file list to decide if it's TeX- or LaTeX-flavored."""
# We need to scan files that are included in case the
# \documentclass command is in them.
# get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS']
savedpath = modify_env_var(env, 'TEXINPUTS', abspath)
paths = env['ENV']['TEXINPUTS']
if SCons.Util.is_List(paths):
pass
else:
# Split at os.pathsep to convert into absolute path
paths = paths.split(os.pathsep)
# now that we have the path list restore the env
if savedpath is _null:
try:
del env['ENV']['TEXINPUTS']
except KeyError:
pass # was never set
else:
env['ENV']['TEXINPUTS'] = savedpath
if Verbose:
print "is_LaTeX search path ",paths
print "files to search :",flist
# Now that we have the search path and file list, check each one
for f in flist:
if Verbose:
print " checking for Latex source ",str(f)
content = f.get_text_contents()
if LaTeX_re.search(content):
if Verbose:
print "file %s is a LaTeX file" % str(f)
return 1
if Verbose:
print "file %s is not a LaTeX file" % str(f)
# now find included files
inc_files = [ ]
inc_files.extend( include_re.findall(content) )
if Verbose:
print "files included by '%s': "%str(f),inc_files
# inc_files is list of file names as given. need to find them
# using TEXINPUTS paths.
# search the included files
for src in inc_files:
srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False)
# make this a list since is_LaTeX takes a list.
fileList = [srcNode,]
if Verbose:
print "FindFile found ",srcNode
if srcNode is not None:
file_test = is_LaTeX(fileList, env, abspath)
# return on first file that finds latex is needed.
if file_test:
return file_test
if Verbose:
print " done scanning ",str(f)
return 0
def TeXLaTeXFunction(target = None, source= None, env=None):
"""A builder for TeX and LaTeX that scans the source file to
decide the "flavor" of the source and then executes the appropriate
program."""
# find these paths for use in is_LaTeX to search for included files
basedir = os.path.split(str(source[0]))[0]
abspath = os.path.abspath(basedir)
if is_LaTeX(source,env,abspath):
result = LaTeXAuxAction(target,source,env)
if result != 0:
check_file_error_message(env['LATEX'])
else:
result = TeXAction(target,source,env)
if result != 0:
check_file_error_message(env['TEX'])
return result
def TeXLaTeXStrFunction(target = None, source= None, env=None):
"""A strfunction for TeX and LaTeX that scans the source file to
decide the "flavor" of the source and then returns the appropriate
command string."""
if env.GetOption("no_exec"):
# find these paths for use in is_LaTeX to search for included files
basedir = os.path.split(str(source[0]))[0]
abspath = os.path.abspath(basedir)
if is_LaTeX(source,env,abspath):
result = env.subst('$LATEXCOM',0,target,source)+" ..."
else:
result = env.subst("$TEXCOM",0,target,source)+" ..."
else:
result = ''
return result
def tex_eps_emitter(target, source, env):
"""An emitter for TeX and LaTeX sources when
executing tex or latex. It will accept .ps and .eps
graphics files
"""
(target, source) = tex_emitter_core(target, source, env, TexGraphics)
return (target, source)
def tex_pdf_emitter(target, source, env):
"""An emitter for TeX and LaTeX sources when
executing pdftex or pdflatex. It will accept graphics
files of types .pdf, .jpg, .png, .gif, and .tif
"""
(target, source) = tex_emitter_core(target, source, env, LatexGraphics)
return (target, source)
def ScanFiles(theFile, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files):
""" For theFile (a Node) update any file_tests and search for graphics files
then find all included files and call ScanFiles recursively for each of them"""
content = theFile.get_text_contents()
if Verbose:
print " scanning ",str(theFile)
for i in range(len(file_tests_search)):
if file_tests[i][0] is None:
if Verbose:
print "scan i ",i," files_tests[i] ",file_tests[i], file_tests[i][1]
file_tests[i][0] = file_tests_search[i].search(content)
if Verbose and file_tests[i][0]:
print " found match for ",file_tests[i][1][-1]
# for newglossary insert the suffixes in file_tests[i]
if file_tests[i][0] and file_tests[i][1][-1] == 'newglossary':
findresult = file_tests_search[i].findall(content)
for l in range(len(findresult)) :
(file_tests[i][1]).insert(0,'.'+findresult[l][3])
(file_tests[i][1]).insert(0,'.'+findresult[l][2])
(file_tests[i][1]).insert(0,'.'+findresult[l][0])
suffix_list = ['.'+findresult[l][0],'.'+findresult[l][2],'.'+findresult[l][3] ]
newglossary_suffix.append(suffix_list)
if Verbose:
print " new suffixes for newglossary ",newglossary_suffix
incResult = includeOnly_re.search(content)
if incResult:
aux_files.append(os.path.join(targetdir, incResult.group(1)))
if Verbose:
print "\include file names : ", aux_files
# recursively call this on each of the included files
inc_files = [ ]
inc_files.extend( include_re.findall(content) )
if Verbose:
print "files included by '%s': "%str(theFile),inc_files
# inc_files is list of file names as given. need to find them
# using TEXINPUTS paths.
for src in inc_files:
srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False)
if srcNode is not None:
file_tests = ScanFiles(srcNode, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files)
if Verbose:
print " done scanning ",str(theFile)
return file_tests
def tex_emitter_core(target, source, env, graphics_extensions):
"""An emitter for TeX and LaTeX sources.
For LaTeX sources we try and find the common created files that
are needed on subsequent runs of latex to finish tables of contents,
bibliographies, indices, lists of figures, and hyperlink references.
"""
basename = SCons.Util.splitext(str(source[0]))[0]
basefile = os.path.split(str(basename))[1]
targetdir = os.path.split(str(target[0]))[0]
targetbase = os.path.join(targetdir, basefile)
basedir = os.path.split(str(source[0]))[0]
abspath = os.path.abspath(basedir)
target[0].attributes.path = abspath
#
# file names we will make use of in searching the sources and log file
#
emit_suffixes = ['.aux', '.log', '.ilg', '.blg', '.nls', '.nlg', '.gls', '.glg', '.alg'] + all_suffixes
auxfilename = targetbase + '.aux'
logfilename = targetbase + '.log'
flsfilename = targetbase + '.fls'
env.SideEffect(auxfilename,target[0])
env.SideEffect(logfilename,target[0])
env.SideEffect(flsfilename,target[0])
if Verbose:
print "side effect :",auxfilename,logfilename,flsfilename
env.Clean(target[0],auxfilename)
env.Clean(target[0],logfilename)
env.Clean(target[0],flsfilename)
content = source[0].get_text_contents()
# These variables are no longer used.
#idx_exists = os.path.isfile(targetbase + '.idx')
#nlo_exists = os.path.isfile(targetbase + '.nlo')
#glo_exists = os.path.isfile(targetbase + '.glo')
#acr_exists = os.path.isfile(targetbase + '.acn')
# set up list with the regular expressions
# we use to find features used
file_tests_search = [auxfile_re,
makeindex_re,
bibliography_re,
bibunit_re,
multibib_re,
addbibresource_re,
tableofcontents_re,
listoffigures_re,
listoftables_re,
hyperref_re,
makenomenclature_re,
makeglossary_re,
makeglossaries_re,
makeacronyms_re,
beamer_re,
newglossary_re ]
# set up list with the file suffixes that need emitting
# when a feature is found
file_tests_suff = [['.aux','aux_file'],
['.idx', '.ind', '.ilg','makeindex'],
['.bbl', '.blg','bibliography'],
['.bbl', '.blg','bibunit'],
['.bbl', '.blg','multibib'],
['.bbl', '.blg','.bcf','addbibresource'],
['.toc','contents'],
['.lof','figures'],
['.lot','tables'],
['.out','hyperref'],
['.nlo', '.nls', '.nlg','nomenclature'],
['.glo', '.gls', '.glg','glossary'],
['.glo', '.gls', '.glg','glossaries'],
['.acn', '.acr', '.alg','acronyms'],
['.nav', '.snm', '.out', '.toc','beamer'],
['newglossary',] ]
# for newglossary the suffixes are added as we find the command
# build the list of lists
file_tests = []
for i in range(len(file_tests_search)):
file_tests.append( [None, file_tests_suff[i]] )
# TO-DO: need to add a way for the user to extend this list for whatever
# auxiliary files they create in other (or their own) packages
# get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS']
savedpath = modify_env_var(env, 'TEXINPUTS', abspath)
paths = env['ENV']['TEXINPUTS']
if SCons.Util.is_List(paths):
pass
else:
# Split at os.pathsep to convert into absolute path
paths = paths.split(os.pathsep)
# now that we have the path list restore the env
if savedpath is _null:
try:
del env['ENV']['TEXINPUTS']
except KeyError:
pass # was never set
else:
env['ENV']['TEXINPUTS'] = savedpath
if Verbose:
print "search path ",paths
# scan all sources for side effect files
aux_files = []
file_tests = ScanFiles(source[0], target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files)
for (theSearch,suffix_list) in file_tests:
# add side effects if feature is present.If file is to be generated,add all side effects
if Verbose and theSearch:
print "check side effects for ",suffix_list[-1]
if (theSearch != None) or (not source[0].exists() ):
file_list = [targetbase,]
# for bibunit we need a list of files
if suffix_list[-1] == 'bibunit':
file_basename = os.path.join(targetdir, 'bu*.aux')
file_list = glob.glob(file_basename)
# remove the suffix '.aux'
for i in range(len(file_list)):
file_list.append(SCons.Util.splitext(file_list[i])[0])
# for multibib we need a list of files
if suffix_list[-1] == 'multibib':
for multibibmatch in multibib_re.finditer(content):
if Verbose:
print "multibib match ",multibibmatch.group(1)
if multibibmatch != None:
baselist = multibibmatch.group(1).split(',')
if Verbose:
print "multibib list ", baselist
for i in range(len(baselist)):
file_list.append(os.path.join(targetdir, baselist[i]))
# now define the side effects
for file_name in file_list:
for suffix in suffix_list[:-1]:
env.SideEffect(file_name + suffix,target[0])
if Verbose:
print "side effect tst :",file_name + suffix, " target is ",str(target[0])
env.Clean(target[0],file_name + suffix)
for aFile in aux_files:
aFile_base = SCons.Util.splitext(aFile)[0]
env.SideEffect(aFile_base + '.aux',target[0])
if Verbose:
print "side effect aux :",aFile_base + '.aux'
env.Clean(target[0],aFile_base + '.aux')
# read fls file to get all other files that latex creates and will read on the next pass
# remove files from list that we explicitly dealt with above
if os.path.isfile(flsfilename):
content = open(flsfilename, "rb").read()
out_files = openout_re.findall(content)
myfiles = [auxfilename, logfilename, flsfilename, targetbase+'.dvi',targetbase+'.pdf']
for filename in out_files[:]:
if filename in myfiles:
out_files.remove(filename)
env.SideEffect(out_files,target[0])
if Verbose:
print "side effect fls :",out_files
env.Clean(target[0],out_files)
return (target, source)
TeXLaTeXAction = None
def generate(env):
"""Add Builders and construction variables for TeX to an Environment."""
global TeXLaTeXAction
if TeXLaTeXAction is None:
TeXLaTeXAction = SCons.Action.Action(TeXLaTeXFunction,
strfunction=TeXLaTeXStrFunction)
env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes)
generate_common(env)
import dvi
dvi.generate(env)
bld = env['BUILDERS']['DVI']
bld.add_action('.tex', TeXLaTeXAction)
bld.add_emitter('.tex', tex_eps_emitter)
def generate_darwin(env):
try:
environ = env['ENV']
except KeyError:
environ = {}
env['ENV'] = environ
if (platform.system() == 'Darwin'):
try:
ospath = env['ENV']['PATHOSX']
except:
ospath = None
if ospath:
env.AppendENVPath('PATH', ospath)
def generate_common(env):
"""Add internal Builders and construction variables for LaTeX to an Environment."""
# Add OSX system paths so TeX tools can be found
# when a list of tools is given the exists() method is not called
generate_darwin(env)
# A generic tex file Action, sufficient for all tex files.
global TeXAction
if TeXAction is None:
TeXAction = SCons.Action.Action("$TEXCOM", "$TEXCOMSTR")
# An Action to build a latex file. This might be needed more
# than once if we are dealing with labels and bibtex.
global LaTeXAction
if LaTeXAction is None:
LaTeXAction = SCons.Action.Action("$LATEXCOM", "$LATEXCOMSTR")
# Define an action to run BibTeX on a file.
global BibTeXAction
if BibTeXAction is None:
BibTeXAction = SCons.Action.Action("$BIBTEXCOM", "$BIBTEXCOMSTR")
# Define an action to run Biber on a file.
global BiberAction
if BiberAction is None:
BiberAction = SCons.Action.Action("$BIBERCOM", "$BIBERCOMSTR")
# Define an action to run MakeIndex on a file.
global MakeIndexAction
if MakeIndexAction is None:
MakeIndexAction = SCons.Action.Action("$MAKEINDEXCOM", "$MAKEINDEXCOMSTR")
# Define an action to run MakeIndex on a file for nomenclatures.
global MakeNclAction
if MakeNclAction is None:
MakeNclAction = SCons.Action.Action("$MAKENCLCOM", "$MAKENCLCOMSTR")
# Define an action to run MakeIndex on a file for glossaries.
global MakeGlossaryAction
if MakeGlossaryAction is None:
MakeGlossaryAction = SCons.Action.Action("$MAKEGLOSSARYCOM", "$MAKEGLOSSARYCOMSTR")
# Define an action to run MakeIndex on a file for acronyms.
global MakeAcronymsAction
if MakeAcronymsAction is None:
MakeAcronymsAction = SCons.Action.Action("$MAKEACRONYMSCOM", "$MAKEACRONYMSCOMSTR")
try:
environ = env['ENV']
except KeyError:
environ = {}
env['ENV'] = environ
# Some Linux platforms have pdflatex set up in a way
# that requires that the HOME environment variable be set.
# Add it here if defined.
v = os.environ.get('HOME')
if v:
environ['HOME'] = v
CDCOM = 'cd '
if platform.system() == 'Windows':
# allow cd command to change drives on Windows
CDCOM = 'cd /D '
env['TEX'] = 'tex'
env['TEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['TEXCOM'] = CDCOM + '${TARGET.dir} && $TEX $TEXFLAGS ${SOURCE.file}'
env['PDFTEX'] = 'pdftex'
env['PDFTEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['PDFTEXCOM'] = CDCOM + '${TARGET.dir} && $PDFTEX $PDFTEXFLAGS ${SOURCE.file}'
env['LATEX'] = 'latex'
env['LATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['LATEXCOM'] = CDCOM + '${TARGET.dir} && $LATEX $LATEXFLAGS ${SOURCE.file}'
env['LATEXRETRIES'] = 4
env['PDFLATEX'] = 'pdflatex'
env['PDFLATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['PDFLATEXCOM'] = CDCOM + '${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}'
env['BIBTEX'] = 'bibtex'
env['BIBTEXFLAGS'] = SCons.Util.CLVar('')
env['BIBTEXCOM'] = CDCOM + '${TARGET.dir} && $BIBTEX $BIBTEXFLAGS ${SOURCE.filebase}'
env['BIBER'] = 'biber'
env['BIBERFLAGS'] = SCons.Util.CLVar('')
env['BIBERCOM'] = CDCOM + '${TARGET.dir} && $BIBER $BIBERFLAGS ${SOURCE.filebase}'
env['MAKEINDEX'] = 'makeindex'
env['MAKEINDEXFLAGS'] = SCons.Util.CLVar('')
env['MAKEINDEXCOM'] = CDCOM + '${TARGET.dir} && $MAKEINDEX $MAKEINDEXFLAGS ${SOURCE.file}'
env['MAKEGLOSSARY'] = 'makeindex'
env['MAKEGLOSSARYSTYLE'] = '${SOURCE.filebase}.ist'
env['MAKEGLOSSARYFLAGS'] = SCons.Util.CLVar('-s ${MAKEGLOSSARYSTYLE} -t ${SOURCE.filebase}.glg')
env['MAKEGLOSSARYCOM'] = CDCOM + '${TARGET.dir} && $MAKEGLOSSARY ${SOURCE.filebase}.glo $MAKEGLOSSARYFLAGS -o ${SOURCE.filebase}.gls'
env['MAKEACRONYMS'] = 'makeindex'
env['MAKEACRONYMSSTYLE'] = '${SOURCE.filebase}.ist'
env['MAKEACRONYMSFLAGS'] = SCons.Util.CLVar('-s ${MAKEACRONYMSSTYLE} -t ${SOURCE.filebase}.alg')
env['MAKEACRONYMSCOM'] = CDCOM + '${TARGET.dir} && $MAKEACRONYMS ${SOURCE.filebase}.acn $MAKEACRONYMSFLAGS -o ${SOURCE.filebase}.acr'
env['MAKENCL'] = 'makeindex'
env['MAKENCLSTYLE'] = 'nomencl.ist'
env['MAKENCLFLAGS'] = '-s ${MAKENCLSTYLE} -t ${SOURCE.filebase}.nlg'
env['MAKENCLCOM'] = CDCOM + '${TARGET.dir} && $MAKENCL ${SOURCE.filebase}.nlo $MAKENCLFLAGS -o ${SOURCE.filebase}.nls'
env['MAKENEWGLOSSARY'] = 'makeindex'
env['MAKENEWGLOSSARYCOM'] = CDCOM + '${TARGET.dir} && $MAKENEWGLOSSARY '
def exists(env):
generate_darwin(env)
return env.Detect('tex')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
mustafat/odoo-1 | addons/base_iban/__openerp__.py | 260 | 1724 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'IBAN Bank Accounts',
'version': '1.0',
'category': 'Hidden/Dependency',
'description': """
This module installs the base for IBAN (International Bank Account Number) bank accounts and checks for it's validity.
======================================================================================================================
The ability to extract the correctly represented local accounts from IBAN accounts
with a single statement.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['base'],
'data': ['base_iban_data.xml' , 'base_iban_view.xml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
toothris/toothris | src/bprofile.py | 1 | 5189 | # Copyright 2008, 2015 Oleg Plakhotniuk
#
# This file is part of Toothris.
#
# Toothris is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Toothris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Toothris. If not, see <http://www.gnu.org/licenses/>.
# LIBS
import pygame
# CONSTS
MEASURES_MIN_COUNT = 200
class Profiler :
def __init__ ( self, name ) :
self.name = name
self.time_min = 999999
self.time_max = 0
self.time_total = 0
self.measures = - MEASURES_MIN_COUNT
self.clock = pygame.time.Clock ()
self.measuring = False
self.profilers = {}
def begin ( self ) :
if self.measuring :
raise RuntimeError ( "trying to start already started profiler" )
self.clock.tick ()
self.measuring = True
def end ( self ) :
if not self.measuring :
raise RuntimeError ( "trying to stop not started profiler" )
self.clock.tick ()
self.measuring = False
self.measures += 1
if self.measures > 0 :
self.time_total += self.clock.get_time ()
self.time_min = min ( self.time_min, self.clock.get_time () )
self.time_max = max ( self.time_max, self.clock.get_time () )
def time_avg ( self ) :
return float ( self.time_total ) / max ( self.measures, 1 )
root_profilers = {}
stack_profilers = []
def begin ( name ) :
global stack_profiler
global root_profilers
if not isinstance ( name, type ( "" ) ) :
raise RuntimeError ( "string name expected" )
if name == "" :
raise RuntimeError ( "name must not be empty" )
if len ( stack_profilers ) > 0 :
profilers = stack_profilers [ len ( stack_profilers ) - 1 ].profilers
else :
profilers = root_profilers
if name in profilers :
profiler = profilers [ name ]
else :
profiler = Profiler ( name )
profilers [ name ] = profiler
profiler.begin ()
stack_profilers.append ( profiler )
def end ( name ) :
global stack_profilers
if not isinstance ( name, type ( "" ) ) :
raise RuntimeError ( "string name expected" )
if len ( stack_profilers ) == 0 :
raise RuntimeError ( "no profiler currently running" )
if name == "" :
raise RuntimeError ( "name must not be empty" )
last_profiler = stack_profilers [ len ( stack_profilers ) - 1 ]
if name != last_profiler.name :
raise RuntimeError ( "trying to stop profiler " + name + \
" before profiler " + last_profiler.name )
stack_profilers.pop ().end ()
def stats_profilers ( profilers, indent = 0 ) :
if len ( profilers ) == 0 :
return
def padded_str ( value, max_len = 0, left_padding = True ) :
if isinstance ( value, type ( "" ) ) :
str_value = value
elif isinstance ( value, type ( 0 ) ) :
str_value = str ( value )
elif isinstance ( value, type ( 0.0 ) ) :
str_value = "%(number).2f" % { "number" : value }
spaces = max ( 0, max_len - len ( str_value ) )
if left_padding :
return " " * spaces + str_value
else :
return str_value + " " * spaces
longest_name = max ( [ len ( padded_str ( p.name ) ) for p in profilers.values () ] )
longest_min = max ( [ len ( padded_str ( p.time_min ) ) for p in profilers.values () ] )
longest_max = max ( [ len ( padded_str ( p.time_max ) ) for p in profilers.values () ] )
longest_avg = max ( [ len ( padded_str ( p.time_avg() ) ) for p in profilers.values () ] )
longest_msr = max ( [ len ( padded_str ( p.measures ) ) for p in profilers.values () ] )
names = profilers.keys ()
names.sort ()
for name in names :
profiler = profilers [ name ]
if profiler.measures > 0 :
print " " * 4 * indent + padded_str ( profiler.name , longest_name, False ) + \
" : min = " + padded_str ( profiler.time_min , longest_min ) + \
" max = " + padded_str ( profiler.time_max , longest_max ) + \
" avg = " + padded_str ( profiler.time_avg(), longest_avg ) + \
" frames = " + padded_str ( profiler.measures , longest_msr )
else :
print " " * 4 * indent + padded_str ( profiler.name , longest_name, False ) + \
" : not enough frames to profile ( " + str ( -profiler.measures ) + " left )"
stats_profilers ( profiler.profilers, indent + 1 )
def stats () :
print "profilers stats:"
stats_profilers ( root_profilers )
| gpl-3.0 |
PatrikKT/android_kernel_htc_a11ul | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
hilaskis/UAV_MissionPlanner | Lib/site-packages/numpy/f2py/common_rules.py | 51 | 4709 | #!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
"""
Build common block mechanism for f2py2e.
Copyright 2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 10:57:33 $
Pearu Peterson
"""
__version__ = "$Revision: 1.19 $"[10:-1]
import __version__
f2py_version = __version__.version
import pprint
import sys
errmess=sys.stderr.write
outmess=sys.stdout.write
show=pprint.pprint
from auxfuncs import *
import capi_maps
import func2subr
from crackfortran import rmbadname
##############
def findcommonblocks(block,top=1):
ret = []
if hascommon(block):
for n in block['common'].keys():
vars={}
for v in block['common'][n]:
vars[v]=block['vars'][v]
ret.append((n,block['common'][n],vars))
elif hasbody(block):
for b in block['body']:
ret=ret+findcommonblocks(b,0)
if top:
tret=[]
names=[]
for t in ret:
if t[0] not in names:
names.append(t[0])
tret.append(t)
return tret
return ret
def buildhooks(m):
ret = {'commonhooks':[],'initcommonhooks':[],'docs':['"COMMON blocks:\\n"']}
fwrap = ['']
def fadd(line,s=fwrap): s[0] = '%s\n %s'%(s[0],line)
chooks = ['']
def cadd(line,s=chooks): s[0] = '%s\n%s'%(s[0],line)
ihooks = ['']
def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0],line)
doc = ['']
def dadd(line,s=doc): s[0] = '%s\n%s'%(s[0],line)
for (name,vnames,vars) in findcommonblocks(m):
lower_name = name.lower()
hnames,inames = [],[]
for n in vnames:
if isintent_hide(vars[n]): hnames.append(n)
else: inames.append(n)
if hnames:
outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n'%(name,','.join(inames),','.join(hnames)))
else:
outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n'%(name,','.join(inames)))
fadd('subroutine f2pyinit%s(setupfunc)'%name)
fadd('external setupfunc')
for n in vnames:
fadd(func2subr.var2fixfortran(vars,n))
if name=='_BLNK_':
fadd('common %s'%(','.join(vnames)))
else:
fadd('common /%s/ %s'%(name,','.join(vnames)))
fadd('call setupfunc(%s)'%(','.join(inames)))
fadd('end\n')
cadd('static FortranDataDef f2py_%s_def[] = {'%(name))
idims=[]
for n in inames:
ct = capi_maps.getctype(vars[n])
at = capi_maps.c2capi_map[ct]
dm = capi_maps.getarrdims(n,vars[n])
if dm['dims']: idims.append('(%s)'%(dm['dims']))
else: idims.append('')
dms=dm['dims'].strip()
if not dms: dms='-1'
cadd('\t{\"%s\",%s,{{%s}},%s},'%(n,dm['rank'],dms,at))
cadd('\t{NULL}\n};')
inames1 = rmbadname(inames)
inames1_tps = ','.join(map(lambda s:'char *'+s,inames1))
cadd('static void f2py_setup_%s(%s) {'%(name,inames1_tps))
cadd('\tint i_f2py=0;')
for n in inames1:
cadd('\tf2py_%s_def[i_f2py++].data = %s;'%(name,n))
cadd('}')
if '_' in lower_name:
F_FUNC='F_FUNC_US'
else:
F_FUNC='F_FUNC'
cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));'\
%(F_FUNC,lower_name,name.upper(),
','.join(['char*']*len(inames1))))
cadd('static void f2py_init_%s(void) {'%name)
cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'\
%(F_FUNC,lower_name,name.upper(),name))
cadd('}\n')
iadd('\tF2PyDict_SetItemString(d, \"%s\", PyFortranObject_New(f2py_%s_def,f2py_init_%s));'%(name,name,name))
tname = name.replace('_','\\_')
dadd('\\subsection{Common block \\texttt{%s}}\n'%(tname))
dadd('\\begin{description}')
for n in inames:
dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n,vars[n])))
if hasnote(vars[n]):
note = vars[n]['note']
if type(note) is type([]): note='\n'.join(note)
dadd('--- %s'%(note))
dadd('\\end{description}')
ret['docs'].append('"\t/%s/ %s\\n"'%(name,','.join(map(lambda v,d:v+d,inames,idims))))
ret['commonhooks']=chooks
ret['initcommonhooks']=ihooks
ret['latexdoc']=doc[0]
if len(ret['docs'])<=1: ret['docs']=''
return ret,fwrap[0]
| gpl-2.0 |
PetrDlouhy/django | django/contrib/gis/db/models/query.py | 56 | 36394 | import warnings
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.db.models.fields import (
GeometryField, LineStringField, PointField, get_srid_info,
)
from django.contrib.gis.db.models.lookups import GISLookup
from django.contrib.gis.db.models.sql import (
AreaField, DistanceField, GeomField, GMLField,
)
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
from django.db import connections
from django.db.models.expressions import RawSQL
from django.db.models.fields import Field
from django.db.models.query import QuerySet
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
class GeoQuerySet(QuerySet):
"The Geographic QuerySet."
# ### GeoQuerySet Methods ###
def area(self, tolerance=0.05, **kwargs):
"""
Returns the area of the geographic field in an `area` attribute on
each element of this GeoQuerySet.
"""
# Performing setup here rather than in `_spatial_attribute` so that
# we can get the units for `AreaField`.
procedure_args, geo_field = self._spatial_setup(
'area', field_name=kwargs.get('field_name', None))
s = {'procedure_args': procedure_args,
'geo_field': geo_field,
'setup': False,
}
connection = connections[self.db]
backend = connection.ops
if backend.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
s['select_field'] = AreaField('sq_m') # Oracle returns area in units of meters.
elif backend.postgis or backend.spatialite:
if backend.geography:
# Geography fields support area calculation, returns square meters.
s['select_field'] = AreaField('sq_m')
elif not geo_field.geodetic(connection):
# Getting the area units of the geographic field.
s['select_field'] = AreaField(Area.unit_attname(geo_field.units_name(connection)))
else:
# TODO: Do we want to support raw number areas for geodetic fields?
raise Exception('Area on geodetic coordinate systems not supported.')
return self._spatial_attribute('area', s, **kwargs)
def centroid(self, **kwargs):
"""
Returns the centroid of the geographic field in a `centroid`
attribute on each element of this GeoQuerySet.
"""
return self._geom_attribute('centroid', **kwargs)
def collect(self, **kwargs):
"""
Performs an aggregate collect operation on the given geometry field.
This is analogous to a union operation, but much faster because
boundaries are not dissolved.
"""
warnings.warn(
"The collect GeoQuerySet method is deprecated. Use the Collect() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Collect, **kwargs)
def difference(self, geom, **kwargs):
"""
Returns the spatial difference of the geographic field in a `difference`
attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('difference', geom, **kwargs)
def distance(self, geom, **kwargs):
"""
Returns the distance from the given geographic field name to the
given geometry in a `distance` attribute on each element of the
GeoQuerySet.
Keyword Arguments:
`spheroid` => If the geometry field is geodetic and PostGIS is
the spatial database, then the more accurate
spheroid calculation will be used instead of the
quicker sphere calculation.
`tolerance` => Used only for Oracle. The tolerance is
in meters -- a default of 5 centimeters (0.05)
is used.
"""
return self._distance_attribute('distance', geom, **kwargs)
def envelope(self, **kwargs):
"""
Returns a Geometry representing the bounding box of the
Geometry field in an `envelope` attribute on each element of
the GeoQuerySet.
"""
return self._geom_attribute('envelope', **kwargs)
def extent(self, **kwargs):
"""
Returns the extent (aggregate) of the features in the GeoQuerySet. The
extent will be returned as a 4-tuple, consisting of (xmin, ymin, xmax, ymax).
"""
warnings.warn(
"The extent GeoQuerySet method is deprecated. Use the Extent() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Extent, **kwargs)
def extent3d(self, **kwargs):
"""
Returns the aggregate extent, in 3D, of the features in the
GeoQuerySet. It is returned as a 6-tuple, comprising:
(xmin, ymin, zmin, xmax, ymax, zmax).
"""
warnings.warn(
"The extent3d GeoQuerySet method is deprecated. Use the Extent3D() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Extent3D, **kwargs)
def force_rhr(self, **kwargs):
"""
Returns a modified version of the Polygon/MultiPolygon in which
all of the vertices follow the Right-Hand-Rule. By default,
this is attached as the `force_rhr` attribute on each element
of the GeoQuerySet.
"""
return self._geom_attribute('force_rhr', **kwargs)
def geojson(self, precision=8, crs=False, bbox=False, **kwargs):
"""
Returns a GeoJSON representation of the geometry field in a `geojson`
attribute on each element of the GeoQuerySet.
The `crs` and `bbox` keywords may be set to True if the user wants
the coordinate reference system and the bounding box to be included
in the GeoJSON representation of the geometry.
"""
backend = connections[self.db].ops
if not backend.geojson:
raise NotImplementedError('Only PostGIS 1.3.4+ and SpatiaLite 3.0+ '
'support GeoJSON serialization.')
if not isinstance(precision, six.integer_types):
raise TypeError('Precision keyword must be set with an integer.')
options = 0
if crs and bbox:
options = 3
elif bbox:
options = 1
elif crs:
options = 2
s = {'desc': 'GeoJSON',
'procedure_args': {'precision': precision, 'options': options},
'procedure_fmt': '%(geo_col)s,%(precision)s,%(options)s',
}
return self._spatial_attribute('geojson', s, **kwargs)
def geohash(self, precision=20, **kwargs):
"""
Returns a GeoHash representation of the given field in a `geohash`
attribute on each element of the GeoQuerySet.
The `precision` keyword may be used to custom the number of
_characters_ used in the output GeoHash, the default is 20.
"""
s = {'desc': 'GeoHash',
'procedure_args': {'precision': precision},
'procedure_fmt': '%(geo_col)s,%(precision)s',
}
return self._spatial_attribute('geohash', s, **kwargs)
def gml(self, precision=8, version=2, **kwargs):
"""
Returns GML representation of the given field in a `gml` attribute
on each element of the GeoQuerySet.
"""
backend = connections[self.db].ops
s = {'desc': 'GML', 'procedure_args': {'precision': precision}}
if backend.postgis:
s['procedure_fmt'] = '%(version)s,%(geo_col)s,%(precision)s'
s['procedure_args'] = {'precision': precision, 'version': version}
if backend.oracle:
s['select_field'] = GMLField()
return self._spatial_attribute('gml', s, **kwargs)
def intersection(self, geom, **kwargs):
"""
Returns the spatial intersection of the Geometry field in
an `intersection` attribute on each element of this
GeoQuerySet.
"""
return self._geomset_attribute('intersection', geom, **kwargs)
def kml(self, **kwargs):
"""
Returns KML representation of the geometry field in a `kml`
attribute on each element of this GeoQuerySet.
"""
s = {'desc': 'KML',
'procedure_fmt': '%(geo_col)s,%(precision)s',
'procedure_args': {'precision': kwargs.pop('precision', 8)},
}
return self._spatial_attribute('kml', s, **kwargs)
def length(self, **kwargs):
"""
Returns the length of the geometry field as a `Distance` object
stored in a `length` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('length', None, **kwargs)
def make_line(self, **kwargs):
"""
Creates a linestring from all of the PointField geometries in the
this GeoQuerySet and returns it. This is a spatial aggregate
method, and thus returns a geometry rather than a GeoQuerySet.
"""
warnings.warn(
"The make_line GeoQuerySet method is deprecated. Use the MakeLine() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.MakeLine, geo_field_type=PointField, **kwargs)
def mem_size(self, **kwargs):
"""
Returns the memory size (number of bytes) that the geometry field takes
in a `mem_size` attribute on each element of this GeoQuerySet.
"""
return self._spatial_attribute('mem_size', {}, **kwargs)
def num_geom(self, **kwargs):
"""
Returns the number of geometries if the field is a
GeometryCollection or Multi* Field in a `num_geom`
attribute on each element of this GeoQuerySet; otherwise
the sets with None.
"""
return self._spatial_attribute('num_geom', {}, **kwargs)
def num_points(self, **kwargs):
"""
Returns the number of points in the first linestring in the
Geometry field in a `num_points` attribute on each element of
this GeoQuerySet; otherwise sets with None.
"""
return self._spatial_attribute('num_points', {}, **kwargs)
def perimeter(self, **kwargs):
"""
Returns the perimeter of the geometry field as a `Distance` object
stored in a `perimeter` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('perimeter', None, **kwargs)
def point_on_surface(self, **kwargs):
"""
Returns a Point geometry guaranteed to lie on the surface of the
Geometry field in a `point_on_surface` attribute on each element
of this GeoQuerySet; otherwise sets with None.
"""
return self._geom_attribute('point_on_surface', **kwargs)
def reverse_geom(self, **kwargs):
"""
Reverses the coordinate order of the geometry, and attaches as a
`reverse` attribute on each element of this GeoQuerySet.
"""
s = {'select_field': GeomField()}
kwargs.setdefault('model_att', 'reverse_geom')
if connections[self.db].ops.oracle:
s['geo_field_type'] = LineStringField
return self._spatial_attribute('reverse', s, **kwargs)
def scale(self, x, y, z=0.0, **kwargs):
"""
Scales the geometry to a new size by multiplying the ordinates
with the given x,y,z scale factors.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D scaling.')
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s',
'procedure_args': {'x': x, 'y': y},
'select_field': GeomField(),
}
else:
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args': {'x': x, 'y': y, 'z': z},
'select_field': GeomField(),
}
return self._spatial_attribute('scale', s, **kwargs)
def snap_to_grid(self, *args, **kwargs):
"""
Snap all points of the input geometry to the grid. How the
geometry is snapped to the grid depends on how many arguments
were given:
- 1 argument : A single size to snap both the X and Y grids to.
- 2 arguments: X and Y sizes to snap the grid to.
- 4 arguments: X, Y sizes and the X, Y origins.
"""
if False in [isinstance(arg, (float,) + six.integer_types) for arg in args]:
raise TypeError('Size argument(s) for the grid must be a float or integer values.')
nargs = len(args)
if nargs == 1:
size = args[0]
procedure_fmt = '%(geo_col)s,%(size)s'
procedure_args = {'size': size}
elif nargs == 2:
xsize, ysize = args
procedure_fmt = '%(geo_col)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize': xsize, 'ysize': ysize}
elif nargs == 4:
xsize, ysize, xorigin, yorigin = args
procedure_fmt = '%(geo_col)s,%(xorigin)s,%(yorigin)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize': xsize, 'ysize': ysize,
'xorigin': xorigin, 'yorigin': yorigin}
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `snap_to_grid`.')
s = {'procedure_fmt': procedure_fmt,
'procedure_args': procedure_args,
'select_field': GeomField(),
}
return self._spatial_attribute('snap_to_grid', s, **kwargs)
def svg(self, relative=False, precision=8, **kwargs):
"""
Returns SVG representation of the geographic field in a `svg`
attribute on each element of this GeoQuerySet.
Keyword Arguments:
`relative` => If set to True, this will evaluate the path in
terms of relative moves (rather than absolute).
`precision` => May be used to set the maximum number of decimal
digits used in output (defaults to 8).
"""
relative = int(bool(relative))
if not isinstance(precision, six.integer_types):
raise TypeError('SVG precision keyword argument must be an integer.')
s = {
'desc': 'SVG',
'procedure_fmt': '%(geo_col)s,%(rel)s,%(precision)s',
'procedure_args': {
'rel': relative,
'precision': precision,
}
}
return self._spatial_attribute('svg', s, **kwargs)
def sym_difference(self, geom, **kwargs):
"""
Returns the symmetric difference of the geographic field in a
`sym_difference` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('sym_difference', geom, **kwargs)
def translate(self, x, y, z=0.0, **kwargs):
"""
Translates the geometry to a new location using the given numeric
parameters as offsets.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D translation.')
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s',
'procedure_args': {'x': x, 'y': y},
'select_field': GeomField(),
}
else:
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args': {'x': x, 'y': y, 'z': z},
'select_field': GeomField(),
}
return self._spatial_attribute('translate', s, **kwargs)
def transform(self, srid=4326, **kwargs):
"""
Transforms the given geometry field to the given SRID. If no SRID is
provided, the transformation will default to using 4326 (WGS84).
"""
if not isinstance(srid, six.integer_types):
raise TypeError('An integer SRID must be provided.')
field_name = kwargs.get('field_name', None)
self._spatial_setup('transform', field_name=field_name)
self.query.add_context('transformed_srid', srid)
return self._clone()
def union(self, geom, **kwargs):
"""
Returns the union of the geographic field with the given
Geometry in a `union` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('union', geom, **kwargs)
def unionagg(self, **kwargs):
"""
Performs an aggregate union on the given geometry field. Returns
None if the GeoQuerySet is empty. The `tolerance` keyword is for
Oracle backends only.
"""
warnings.warn(
"The unionagg GeoQuerySet method is deprecated. Use the Union() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Union, **kwargs)
# ### Private API -- Abstracted DRY routines. ###
def _spatial_setup(self, att, desc=None, field_name=None, geo_field_type=None):
"""
Performs set up for executing the spatial function.
"""
# Does the spatial backend support this?
connection = connections[self.db]
func = getattr(connection.ops, att, False)
if desc is None:
desc = att
if not func:
raise NotImplementedError('%s stored procedure not available on '
'the %s backend.' %
(desc, connection.ops.name))
# Initializing the procedure arguments.
procedure_args = {'function': func}
# Is there a geographic field in the model to perform this
# operation on?
geo_field = self._geo_field(field_name)
if not geo_field:
raise TypeError('%s output only available on GeometryFields.' % func)
# If the `geo_field_type` keyword was used, then enforce that
# type limitation.
if geo_field_type is not None and not isinstance(geo_field, geo_field_type):
raise TypeError('"%s" stored procedures may only be called on %ss.' % (func, geo_field_type.__name__))
# Setting the procedure args.
procedure_args['geo_col'] = self._geocol_select(geo_field, field_name)
return procedure_args, geo_field
def _spatial_aggregate(self, aggregate, field_name=None,
geo_field_type=None, tolerance=0.05):
"""
DRY routine for calling aggregate spatial stored procedures and
returning their result to the caller of the function.
"""
# Getting the field the geographic aggregate will be called on.
geo_field = self._geo_field(field_name)
if not geo_field:
raise TypeError('%s aggregate only available on GeometryFields.' % aggregate.name)
# Checking if there are any geo field type limitations on this
# aggregate (e.g. ST_Makeline only operates on PointFields).
if geo_field_type is not None and not isinstance(geo_field, geo_field_type):
raise TypeError('%s aggregate may only be called on %ss.' % (aggregate.name, geo_field_type.__name__))
# Getting the string expression of the field name, as this is the
# argument taken by `Aggregate` objects.
agg_col = field_name or geo_field.name
# Adding any keyword parameters for the Aggregate object. Oracle backends
# in particular need an additional `tolerance` parameter.
agg_kwargs = {}
if connections[self.db].ops.oracle:
agg_kwargs['tolerance'] = tolerance
# Calling the QuerySet.aggregate, and returning only the value of the aggregate.
return self.aggregate(geoagg=aggregate(agg_col, **agg_kwargs))['geoagg']
def _spatial_attribute(self, att, settings, field_name=None, model_att=None):
"""
DRY routine for calling a spatial stored procedure on a geometry column
and attaching its output as an attribute of the model.
Arguments:
att:
The name of the spatial attribute that holds the spatial
SQL function to call.
settings:
Dictonary of internal settings to customize for the spatial procedure.
Public Keyword Arguments:
field_name:
The name of the geographic field to call the spatial
function on. May also be a lookup to a geometry field
as part of a foreign key relation.
model_att:
The name of the model attribute to attach the output of
the spatial function to.
"""
# Default settings.
settings.setdefault('desc', None)
settings.setdefault('geom_args', ())
settings.setdefault('geom_field', None)
settings.setdefault('procedure_args', {})
settings.setdefault('procedure_fmt', '%(geo_col)s')
settings.setdefault('select_params', [])
connection = connections[self.db]
# Performing setup for the spatial column, unless told not to.
if settings.get('setup', True):
default_args, geo_field = self._spatial_setup(
att, desc=settings['desc'], field_name=field_name,
geo_field_type=settings.get('geo_field_type', None))
for k, v in six.iteritems(default_args):
settings['procedure_args'].setdefault(k, v)
else:
geo_field = settings['geo_field']
# The attribute to attach to the model.
if not isinstance(model_att, six.string_types):
model_att = att
# Special handling for any argument that is a geometry.
for name in settings['geom_args']:
# Using the field's get_placeholder() routine to get any needed
# transformation SQL.
geom = geo_field.get_prep_value(settings['procedure_args'][name])
params = geo_field.get_db_prep_lookup('contains', geom, connection=connection)
geom_placeholder = geo_field.get_placeholder(geom, None, connection)
# Replacing the procedure format with that of any needed
# transformation SQL.
old_fmt = '%%(%s)s' % name
new_fmt = geom_placeholder % '%%s'
settings['procedure_fmt'] = settings['procedure_fmt'].replace(old_fmt, new_fmt)
settings['select_params'].extend(params)
# Getting the format for the stored procedure.
fmt = '%%(function)s(%s)' % settings['procedure_fmt']
# If the result of this function needs to be converted.
if settings.get('select_field', False):
select_field = settings['select_field']
if connection.ops.oracle:
select_field.empty_strings_allowed = False
else:
select_field = Field()
# Finally, setting the extra selection attribute with
# the format string expanded with the stored procedure
# arguments.
self.query.add_annotation(
RawSQL(fmt % settings['procedure_args'], settings['select_params'], select_field),
model_att)
return self
def _distance_attribute(self, func, geom=None, tolerance=0.05, spheroid=False, **kwargs):
"""
DRY routine for GeoQuerySet distance attribute routines.
"""
# Setting up the distance procedure arguments.
procedure_args, geo_field = self._spatial_setup(func, field_name=kwargs.get('field_name', None))
# If geodetic defaulting distance attribute to meters (Oracle and
# PostGIS spherical distances return meters). Otherwise, use the
# units of the geometry field.
connection = connections[self.db]
geodetic = geo_field.geodetic(connection)
geography = geo_field.geography
if geodetic:
dist_att = 'm'
else:
dist_att = Distance.unit_attname(geo_field.units_name(connection))
# Shortcut booleans for what distance function we're using and
# whether the geometry field is 3D.
distance = func == 'distance'
length = func == 'length'
perimeter = func == 'perimeter'
if not (distance or length or perimeter):
raise ValueError('Unknown distance function: %s' % func)
geom_3d = geo_field.dim == 3
# The field's get_db_prep_lookup() is used to get any
# extra distance parameters. Here we set up the
# parameters that will be passed in to field's function.
lookup_params = [geom or 'POINT (0 0)', 0]
# Getting the spatial backend operations.
backend = connection.ops
# If the spheroid calculation is desired, either by the `spheroid`
# keyword or when calculating the length of geodetic field, make
# sure the 'spheroid' distance setting string is passed in so we
# get the correct spatial stored procedure.
if spheroid or (backend.postgis and geodetic and
(not geography) and length):
lookup_params.append('spheroid')
lookup_params = geo_field.get_prep_value(lookup_params)
params = geo_field.get_db_prep_lookup('distance_lte', lookup_params, connection=connection)
# The `geom_args` flag is set to true if a geometry parameter was
# passed in.
geom_args = bool(geom)
if backend.oracle:
if distance:
procedure_fmt = '%(geo_col)s,%(geom)s,%(tolerance)s'
elif length or perimeter:
procedure_fmt = '%(geo_col)s,%(tolerance)s'
procedure_args['tolerance'] = tolerance
else:
# Getting whether this field is in units of degrees since the field may have
# been transformed via the `transform` GeoQuerySet method.
srid = self.query.get_context('transformed_srid')
if srid:
u, unit_name, s = get_srid_info(srid, connection)
geodetic = unit_name.lower() in geo_field.geodetic_units
if geodetic and not connection.features.supports_distance_geodetic:
raise ValueError(
'This database does not support linear distance '
'calculations on geodetic coordinate systems.'
)
if distance:
if srid:
# Setting the `geom_args` flag to false because we want to handle
# transformation SQL here, rather than the way done by default
# (which will transform to the original SRID of the field rather
# than to what was transformed to).
geom_args = False
procedure_fmt = '%s(%%(geo_col)s, %s)' % (backend.transform, srid)
if geom.srid is None or geom.srid == srid:
# If the geom parameter srid is None, it is assumed the coordinates
# are in the transformed units. A placeholder is used for the
# geometry parameter. `GeomFromText` constructor is also needed
# to wrap geom placeholder for SpatiaLite.
if backend.spatialite:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.from_text, srid)
else:
procedure_fmt += ', %%s'
else:
# We need to transform the geom to the srid specified in `transform()`,
# so wrapping the geometry placeholder in transformation SQL.
# SpatiaLite also needs geometry placeholder wrapped in `GeomFromText`
# constructor.
if backend.spatialite:
procedure_fmt += (', %s(%s(%%%%s, %s), %s)' % (
backend.transform, backend.from_text,
geom.srid, srid))
else:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.transform, srid)
else:
# `transform()` was not used on this GeoQuerySet.
procedure_fmt = '%(geo_col)s,%(geom)s'
if not geography and geodetic:
# Spherical distance calculation is needed (because the geographic
# field is geodetic). However, the PostGIS ST_distance_sphere/spheroid()
# procedures may only do queries from point columns to point geometries
# some error checking is required.
if not backend.geography:
if not isinstance(geo_field, PointField):
raise ValueError('Spherical distance calculation only supported on PointFields.')
if not str(Geometry(six.memoryview(params[0].ewkb)).geom_type) == 'Point':
raise ValueError(
'Spherical distance calculation only supported with '
'Point Geometry parameters'
)
# The `function` procedure argument needs to be set differently for
# geodetic distance calculations.
if spheroid:
# Call to distance_spheroid() requires spheroid param as well.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function': backend.distance_spheroid, 'spheroid': params[1]})
else:
procedure_args.update({'function': backend.distance_sphere})
elif length or perimeter:
procedure_fmt = '%(geo_col)s'
if not geography and geodetic and length:
# There's no `length_sphere`, and `length_spheroid` also
# works on 3D geometries.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function': backend.length_spheroid, 'spheroid': params[1]})
elif geom_3d and connection.features.supports_3d_functions:
# Use 3D variants of perimeter and length routines on supported backends.
if perimeter:
procedure_args.update({'function': backend.perimeter3d})
elif length:
procedure_args.update({'function': backend.length3d})
# Setting up the settings for `_spatial_attribute`.
s = {'select_field': DistanceField(dist_att),
'setup': False,
'geo_field': geo_field,
'procedure_args': procedure_args,
'procedure_fmt': procedure_fmt,
}
if geom_args:
s['geom_args'] = ('geom',)
s['procedure_args']['geom'] = geom
elif geom:
# The geometry is passed in as a parameter because we handled
# transformation conditions in this routine.
s['select_params'] = [backend.Adapter(geom)]
return self._spatial_attribute(func, s, **kwargs)
def _geom_attribute(self, func, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute (e.g., `centroid`, `point_on_surface`).
"""
s = {'select_field': GeomField()}
if connections[self.db].ops.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args'] = {'tolerance': tolerance}
return self._spatial_attribute(func, s, **kwargs)
def _geomset_attribute(self, func, geom, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute and takes a Geoemtry parameter. This is used
for geometry set-like operations (e.g., intersection, difference,
union, sym_difference).
"""
s = {
'geom_args': ('geom',),
'select_field': GeomField(),
'procedure_fmt': '%(geo_col)s,%(geom)s',
'procedure_args': {'geom': geom},
}
if connections[self.db].ops.oracle:
s['procedure_fmt'] += ',%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
return self._spatial_attribute(func, s, **kwargs)
def _geocol_select(self, geo_field, field_name):
"""
Helper routine for constructing the SQL to select the geographic
column. Takes into account if the geographic field is in a
ForeignKey relation to the current model.
"""
compiler = self.query.get_compiler(self.db)
opts = self.model._meta
if geo_field not in opts.fields:
# Is this operation going to be on a related geographic field?
# If so, it'll have to be added to the select related information
# (e.g., if 'location__point' was given as the field name).
# Note: the operation really is defined as "must add select related!"
self.query.add_select_related([field_name])
# Call pre_sql_setup() so that compiler.select gets populated.
compiler.pre_sql_setup()
for col, _, _ in compiler.select:
if col.output_field == geo_field:
return col.as_sql(compiler, compiler.connection)[0]
raise ValueError("%r not in compiler's related_select_cols" % geo_field)
elif geo_field not in opts.local_fields:
# This geographic field is inherited from another model, so we have to
# use the db table for the _parent_ model instead.
parent_model = geo_field.model._meta.concrete_model
return self._field_column(compiler, geo_field, parent_model._meta.db_table)
else:
return self._field_column(compiler, geo_field)
# Private API utilities, subject to change.
def _geo_field(self, field_name=None):
"""
Returns the first Geometry field encountered or the one specified via
the `field_name` keyword. The `field_name` may be a string specifying
the geometry field on this GeoQuerySet's model, or a lookup string
to a geometry field via a ForeignKey relation.
"""
if field_name is None:
# Incrementing until the first geographic field is found.
for field in self.model._meta.fields:
if isinstance(field, GeometryField):
return field
return False
else:
# Otherwise, check by the given field name -- which may be
# a lookup to a _related_ geographic field.
return GISLookup._check_geo_field(self.model._meta, field_name)
def _field_column(self, compiler, field, table_alias=None, column=None):
"""
Helper function that returns the database column for the given field.
The table and column are returned (quoted) in the proper format, e.g.,
`"geoapp_city"."point"`. If `table_alias` is not specified, the
database table associated with the model of this `GeoQuerySet` will be
used. If `column` is specified, it will be used instead of the value
in `field.column`.
"""
if table_alias is None:
table_alias = compiler.query.get_meta().db_table
return "%s.%s" % (compiler.quote_name_unless_alias(table_alias),
compiler.connection.ops.quote_name(column or field.column))
| bsd-3-clause |
hslee16/ansible-modules-extras | cloud/lxc/lxc_container.py | 9 | 56696 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: lxc_container
short_description: Manage LXC Containers
version_added: 1.8.0
description:
- Management of LXC containers
author: "Kevin Carter (@cloudnull)"
options:
name:
description:
- Name of a container.
required: true
backing_store:
choices:
- dir
- lvm
- loop
- btrfs
- overlayfs
- zfs
description:
- Backend storage type for the container.
required: false
default: dir
template:
description:
- Name of the template to use within an LXC create.
required: false
default: ubuntu
template_options:
description:
- Template options when building the container.
required: false
config:
description:
- Path to the LXC configuration file.
required: false
default: null
lv_name:
description:
- Name of the logical volume, defaults to the container name.
default: $CONTAINER_NAME
required: false
vg_name:
description:
- If Backend store is lvm, specify the name of the volume group.
default: lxc
required: false
thinpool:
description:
- Use LVM thin pool called TP.
required: false
fs_type:
description:
- Create fstype TYPE.
default: ext4
required: false
fs_size:
description:
- File system Size.
default: 5G
required: false
directory:
description:
- Place rootfs directory under DIR.
required: false
zfs_root:
description:
- Create zfs under given zfsroot.
required: false
container_command:
description:
- Run a command within a container.
required: false
lxc_path:
description:
- Place container under PATH
required: false
container_log:
choices:
- true
- false
description:
- Enable a container log for host actions to the container.
default: false
container_log_level:
choices:
- INFO
- ERROR
- DEBUG
description:
- Set the log level for a container where *container_log* was set.
required: false
default: INFO
clone_name:
version_added: "2.0"
description:
- Name of the new cloned server. This is only used when state is
clone.
required: false
default: false
clone_snapshot:
version_added: "2.0"
required: false
choices:
- true
- false
description:
- Create a snapshot a container when cloning. This is not supported
by all container storage backends. Enabling this may fail if the
backing store does not support snapshots.
default: false
archive:
choices:
- true
- false
description:
- Create an archive of a container. This will create a tarball of the
running container.
default: false
archive_path:
description:
- Path the save the archived container. If the path does not exist
the archive method will attempt to create it.
default: null
archive_compression:
choices:
- gzip
- bzip2
- none
description:
- Type of compression to use when creating an archive of a running
container.
default: gzip
state:
choices:
- started
- stopped
- restarted
- absent
- frozen
description:
- Define the state of a container. If you clone a container using
`clone_name` the newly cloned container created in a stopped state.
The running container will be stopped while the clone operation is
happening and upon completion of the clone the original container
state will be restored.
required: false
default: started
container_config:
description:
- list of 'key=value' options to use when configuring a container.
required: false
requirements:
- 'lxc >= 1.0 # OS package'
- 'python >= 2.6 # OS Package'
- 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc'
notes:
- Containers must have a unique name. If you attempt to create a container
with a name that already exists in the users namespace the module will
simply return as "unchanged".
- The "container_command" can be used with any state except "absent". If
used with state "stopped" the container will be "started", the command
executed, and then the container "stopped" again. Likewise if the state
is "stopped" and the container does not exist it will be first created,
"started", the command executed, and then "stopped". If you use a "|"
in the variable you can use common script formatting within the variable
iteself The "container_command" option will always execute as BASH.
When using "container_command" a log file is created in the /tmp/ directory
which contains both stdout and stderr of any command executed.
- If "archive" is **true** the system will attempt to create a compressed
tarball of the running container. The "archive" option supports LVM backed
containers and will create a snapshot of the running container when
creating the archive.
- If your distro does not have a package for "python2-lxc", which is a
requirement for this module, it can be installed from source at
"https://github.com/lxc/python2-lxc" or installed via pip using the package
name lxc-python2.
"""
EXAMPLES = """
- name: Create a started container
lxc_container:
name: test-container-started
container_log: true
template: ubuntu
state: started
template_options: --release trusty
- name: Create a stopped container
lxc_container:
name: test-container-stopped
container_log: true
template: ubuntu
state: stopped
template_options: --release trusty
- name: Create a frozen container
lxc_container:
name: test-container-frozen
container_log: true
template: ubuntu
state: frozen
template_options: --release trusty
container_command: |
echo 'hello world.' | tee /opt/started-frozen
# Create filesystem container, configure it, and archive it, and start it.
- name: Create filesystem container
lxc_container:
name: test-container-config
backing_store: dir
container_log: true
template: ubuntu
state: started
archive: true
archive_compression: none
container_config:
- "lxc.aa_profile=unconfined"
- "lxc.cgroup.devices.allow=a *:* rmw"
template_options: --release trusty
# Create an lvm container, run a complex command in it, add additional
# configuration to it, create an archive of it, and finally leave the container
# in a frozen state. The container archive will be compressed using bzip2
- name: Create a frozen lvm container
lxc_container:
name: test-container-lvm
container_log: true
template: ubuntu
state: frozen
backing_store: lvm
template_options: --release trusty
container_command: |
apt-get update
apt-get install -y vim lxc-dev
echo 'hello world.' | tee /opt/started
if [[ -f "/opt/started" ]]; then
echo 'hello world.' | tee /opt/found-started
fi
container_config:
- "lxc.aa_profile=unconfined"
- "lxc.cgroup.devices.allow=a *:* rmw"
archive: true
archive_compression: bzip2
register: lvm_container_info
- name: Debug info on container "test-container-lvm"
debug: var=lvm_container_info
- name: Run a command in a container and ensure its in a "stopped" state.
lxc_container:
name: test-container-started
state: stopped
container_command: |
echo 'hello world.' | tee /opt/stopped
- name: Run a command in a container and ensure its it in a "frozen" state.
lxc_container:
name: test-container-stopped
state: frozen
container_command: |
echo 'hello world.' | tee /opt/frozen
- name: Start a container
lxc_container:
name: test-container-stopped
state: started
- name: Run a command in a container and then restart it
lxc_container:
name: test-container-started
state: restarted
container_command: |
echo 'hello world.' | tee /opt/restarted
- name: Run a complex command within a "running" container
lxc_container:
name: test-container-started
container_command: |
apt-get update
apt-get install -y curl wget vim apache2
echo 'hello world.' | tee /opt/started
if [[ -f "/opt/started" ]]; then
echo 'hello world.' | tee /opt/found-started
fi
# Create an archive of an existing container, save the archive to a defined
# path and then destroy it.
- name: Archive container
lxc_container:
name: test-container-started
state: absent
archive: true
archive_path: /opt/archives
# Create a container using overlayfs, create an archive of it, create a
# snapshot clone of the container and and finally leave the container
# in a frozen state. The container archive will be compressed using gzip.
- name: Create an overlayfs container archive and clone it
lxc_container:
name: test-container-overlayfs
container_log: true
template: ubuntu
state: started
backing_store: overlayfs
template_options: --release trusty
clone_snapshot: true
clone_name: test-container-overlayfs-clone-snapshot
archive: true
archive_compression: gzip
register: clone_container_info
- name: debug info on container "test-container"
debug: var=clone_container_info
- name: Clone a container using snapshot
lxc_container:
name: test-container-overlayfs-clone-snapshot
backing_store: overlayfs
clone_name: test-container-overlayfs-clone-snapshot2
clone_snapshot: true
- name: Create a new container and clone it
lxc_container:
name: test-container-new-archive
backing_store: dir
clone_name: test-container-new-archive-clone
- name: Archive and clone a container then destroy it
lxc_container:
name: test-container-new-archive
state: absent
clone_name: test-container-new-archive-destroyed-clone
archive: true
archive_compression: gzip
- name: Start a cloned container.
lxc_container:
name: test-container-new-archive-destroyed-clone
state: started
- name: Destroy a container
lxc_container:
name: "{{ item }}"
state: absent
with_items:
- test-container-stopped
- test-container-started
- test-container-frozen
- test-container-lvm
- test-container-config
- test-container-overlayfs
- test-container-overlayfs-clone
- test-container-overlayfs-clone-snapshot
- test-container-overlayfs-clone-snapshot2
- test-container-new-archive
- test-container-new-archive-clone
- test-container-new-archive-destroyed-clone
"""
RETURN="""
lxc_container:
description: container information
returned: success
type: list
contains:
name:
description: name of the lxc container
returned: success
type: string
sample: test_host
init_pid:
description: pid of the lxc init process
returned: success
type: int
sample: 19786
interfaces:
description: list of the container's network interfaces
returned: success
type: list
sample: [ "eth0", "lo" ]
ips:
description: list of ips
returned: success
type: list
sample: [ "10.0.3.3" ]
state:
description: resulting state of the container
returned: success
type: string
sample: "running"
archive:
description: resulting state of the container
returned: success, when archive is true
type: string
sample: "/tmp/test-container-config.tar"
clone:
description: if the container was cloned
returned: success, when clone_name is specified
type: boolean
sample: True
"""
import re
try:
import lxc
except ImportError:
HAS_LXC = False
else:
HAS_LXC = True
# LXC_COMPRESSION_MAP is a map of available compression types when creating
# an archive of a container.
LXC_COMPRESSION_MAP = {
'gzip': {
'extension': 'tar.tgz',
'argument': '-czf'
},
'bzip2': {
'extension': 'tar.bz2',
'argument': '-cjf'
},
'none': {
'extension': 'tar',
'argument': '-cf'
}
}
# LXC_COMMAND_MAP is a map of variables that are available to a method based
# on the state the container is in.
LXC_COMMAND_MAP = {
'create': {
'variables': {
'config': '--config',
'template': '--template',
'backing_store': '--bdev',
'lxc_path': '--lxcpath',
'lv_name': '--lvname',
'vg_name': '--vgname',
'thinpool': '--thinpool',
'fs_type': '--fstype',
'fs_size': '--fssize',
'directory': '--dir',
'zfs_root': '--zfsroot'
}
},
'clone': {
'variables': {
'backing_store': '--backingstore',
'lxc_path': '--lxcpath',
'fs_size': '--fssize',
'name': '--orig',
'clone_name': '--new'
}
}
}
# LXC_BACKING_STORE is a map of available storage backends and options that
# are incompatible with the given storage backend.
LXC_BACKING_STORE = {
'dir': [
'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
],
'lvm': [
'zfs_root'
],
'btrfs': [
'lv_name', 'vg_name', 'thinpool', 'zfs_root', 'fs_type', 'fs_size'
],
'loop': [
'lv_name', 'vg_name', 'thinpool', 'zfs_root'
],
'overlayfs': [
'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root'
],
'zfs': [
'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
]
}
# LXC_LOGGING_LEVELS is a map of available log levels
LXC_LOGGING_LEVELS = {
'INFO': ['info', 'INFO', 'Info'],
'ERROR': ['error', 'ERROR', 'Error'],
'DEBUG': ['debug', 'DEBUG', 'Debug']
}
# LXC_ANSIBLE_STATES is a map of states that contain values of methods used
# when a particular state is evoked.
LXC_ANSIBLE_STATES = {
'started': '_started',
'stopped': '_stopped',
'restarted': '_restarted',
'absent': '_destroyed',
'frozen': '_frozen',
'clone': '_clone'
}
# This is used to attach to a running container and execute commands from
# within the container on the host. This will provide local access to a
# container without using SSH. The template will attempt to work within the
# home directory of the user that was attached to the container and source
# that users environment variables by default.
ATTACH_TEMPLATE = """#!/usr/bin/env bash
pushd "$(getent passwd $(whoami)|cut -f6 -d':')"
if [[ -f ".bashrc" ]];then
source .bashrc
fi
popd
# User defined command
%(container_command)s
"""
def create_script(command):
"""Write out a script onto a target.
This method should be backward compatible with Python 2.4+ when executing
from within the container.
:param command: command to run, this can be a script and can use spacing
with newlines as separation.
:type command: ``str``
"""
import os
import os.path as path
import subprocess
import tempfile
(fd, script_file) = tempfile.mkstemp(prefix='lxc-attach-script')
f = os.fdopen(fd, 'wb')
try:
f.write(ATTACH_TEMPLATE % {'container_command': command})
f.flush()
finally:
f.close()
# Ensure the script is executable.
os.chmod(script_file, int('0700',8))
# Output log file.
stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab')
# Error log file.
stderr_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-err')[0], 'ab')
# Execute the script command.
try:
subprocess.Popen(
[script_file],
stdout=stdout_file,
stderr=stderr_file
).communicate()
finally:
# Close the log files.
stderr_file.close()
stdout_file.close()
# Remove the script file upon completion of execution.
os.remove(script_file)
class LxcContainerManagement(object):
def __init__(self, module):
"""Management of LXC containers via Ansible.
:param module: Processed Ansible Module.
:type module: ``object``
"""
self.module = module
self.state = self.module.params.get('state', None)
self.state_change = False
self.lxc_vg = None
self.container_name = self.module.params['name']
self.container = self.get_container_bind()
self.archive_info = None
self.clone_info = None
def get_container_bind(self):
return lxc.Container(name=self.container_name)
@staticmethod
def _roundup(num):
"""Return a rounded floating point number.
:param num: Number to round up.
:type: ``float``
:returns: Rounded up number.
:rtype: ``int``
"""
num, part = str(num).split('.')
num = int(num)
if int(part) != 0:
num += 1
return num
@staticmethod
def _container_exists(container_name):
"""Check if a container exists.
:param container_name: Name of the container.
:type: ``str``
:returns: True or False if the container is found.
:rtype: ``bol``
"""
if [i for i in lxc.list_containers() if i == container_name]:
return True
else:
return False
@staticmethod
def _add_variables(variables_dict, build_command):
"""Return a command list with all found options.
:param variables_dict: Pre-parsed optional variables used from a
seed command.
:type variables_dict: ``dict``
:param build_command: Command to run.
:type build_command: ``list``
:returns: list of command options.
:rtype: ``list``
"""
for key, value in variables_dict.items():
build_command.append(
'%s %s' % (key, value)
)
else:
return build_command
def _get_vars(self, variables):
"""Return a dict of all variables as found within the module.
:param variables: Hash of all variables to find.
:type variables: ``dict``
"""
# Remove incompatible storage backend options.
variables = variables.copy()
for v in LXC_BACKING_STORE[self.module.params['backing_store']]:
variables.pop(v, None)
return_dict = dict()
false_values = [None, ''] + BOOLEANS_FALSE
for k, v in variables.items():
_var = self.module.params.get(k)
if _var not in false_values:
return_dict[v] = _var
else:
return return_dict
def _run_command(self, build_command, unsafe_shell=False, timeout=600):
"""Return information from running an Ansible Command.
This will squash the build command list into a string and then
execute the command via Ansible. The output is returned to the method.
This output is returned as `return_code`, `stdout`, `stderr`.
Prior to running the command the method will look to see if the LXC
lockfile is present. If the lockfile "/var/lock/subsys/lxc" the method
will wait upto 10 minutes for it to be gone; polling every 5 seconds.
:param build_command: Used for the command and all options.
:type build_command: ``list``
:param unsafe_shell: Enable or Disable unsafe sell commands.
:type unsafe_shell: ``bol``
:param timeout: Time before the container create process quites.
:type timeout: ``int``
"""
lockfile = '/var/lock/subsys/lxc'
for _ in xrange(timeout):
if os.path.exists(lockfile):
time.sleep(1)
else:
return self.module.run_command(
' '.join(build_command),
use_unsafe_shell=unsafe_shell
)
else:
message = (
'The LXC subsystem is locked and after 5 minutes it never'
' became unlocked. Lockfile [ %s ]' % lockfile
)
self.failure(
error='LXC subsystem locked',
rc=0,
msg=message
)
def _config(self):
"""Configure an LXC container.
Write new configuration values to the lxc config file. This will
stop the container if it's running write the new options and then
restart the container upon completion.
"""
_container_config = self.module.params.get('container_config')
if not _container_config:
return False
container_config_file = self.container.config_file_name
with open(container_config_file, 'rb') as f:
container_config = f.readlines()
# Note used ast literal_eval because AnsibleModule does not provide for
# adequate dictionary parsing.
# Issue: https://github.com/ansible/ansible/issues/7679
# TODO(cloudnull) adjust import when issue has been resolved.
import ast
options_dict = ast.literal_eval(_container_config)
parsed_options = [i.split('=', 1) for i in options_dict]
config_change = False
for key, value in parsed_options:
key = key.strip()
value = value.strip()
new_entry = '%s = %s\n' % (key, value)
keyre = re.compile(r'%s(\s+)?=' % key)
for option_line in container_config:
# Look for key in config
if keyre.match(option_line):
_, _value = option_line.split('=', 1)
config_value = ' '.join(_value.split())
line_index = container_config.index(option_line)
# If the sanitized values don't match replace them
if value != config_value:
line_index += 1
if new_entry not in container_config:
config_change = True
container_config.insert(line_index, new_entry)
# Break the flow as values are written or not at this point
break
else:
config_change = True
container_config.append(new_entry)
# If the config changed restart the container.
if config_change:
container_state = self._get_state()
if container_state != 'stopped':
self.container.stop()
with open(container_config_file, 'wb') as f:
f.writelines(container_config)
self.state_change = True
if container_state == 'running':
self._container_startup()
elif container_state == 'frozen':
self._container_startup()
self.container.freeze()
def _container_create_clone(self):
"""Clone a new LXC container from an existing container.
This method will clone an existing container to a new container using
the `clone_name` variable as the new container name. The method will
create a container if the container `name` does not exist.
Note that cloning a container will ensure that the original container
is "stopped" before the clone can be done. Because this operation can
require a state change the method will return the original container
to its prior state upon completion of the clone.
Once the clone is complete the new container will be left in a stopped
state.
"""
# Ensure that the state of the original container is stopped
container_state = self._get_state()
if container_state != 'stopped':
self.state_change = True
self.container.stop()
build_command = [
self.module.get_bin_path('lxc-clone', True),
]
build_command = self._add_variables(
variables_dict=self._get_vars(
variables=LXC_COMMAND_MAP['clone']['variables']
),
build_command=build_command
)
# Load logging for the instance when creating it.
if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE:
build_command.append('--snapshot')
# Check for backing_store == overlayfs if so force the use of snapshot
# If overlay fs is used and snapshot is unset the clone command will
# fail with an unsupported type.
elif self.module.params.get('backing_store') == 'overlayfs':
build_command.append('--snapshot')
rc, return_data, err = self._run_command(build_command)
if rc != 0:
message = "Failed executing lxc-clone."
self.failure(
err=err, rc=rc, msg=message, command=' '.join(
build_command
)
)
else:
self.state_change = True
# Restore the original state of the origin container if it was
# not in a stopped state.
if container_state == 'running':
self.container.start()
elif container_state == 'frozen':
self.container.start()
self.container.freeze()
return True
def _create(self):
"""Create a new LXC container.
This method will build and execute a shell command to build the
container. It would have been nice to simply use the lxc python library
however at the time this was written the python library, in both py2
and py3 didn't support some of the more advanced container create
processes. These missing processes mainly revolve around backing
LXC containers with block devices.
"""
build_command = [
self.module.get_bin_path('lxc-create', True),
'--name %s' % self.container_name,
'--quiet'
]
build_command = self._add_variables(
variables_dict=self._get_vars(
variables=LXC_COMMAND_MAP['create']['variables']
),
build_command=build_command
)
# Load logging for the instance when creating it.
if self.module.params.get('container_log') in BOOLEANS_TRUE:
# Set the logging path to the /var/log/lxc if uid is root. else
# set it to the home folder of the user executing.
try:
if os.getuid() != 0:
log_path = os.getenv('HOME')
else:
if not os.path.isdir('/var/log/lxc/'):
os.makedirs('/var/log/lxc/')
log_path = '/var/log/lxc/'
except OSError:
log_path = os.getenv('HOME')
build_command.extend([
'--logfile %s' % os.path.join(
log_path, 'lxc-%s.log' % self.container_name
),
'--logpriority %s' % self.module.params.get(
'container_log_level'
).upper()
])
# Add the template commands to the end of the command if there are any
template_options = self.module.params.get('template_options', None)
if template_options:
build_command.append('-- %s' % template_options)
rc, return_data, err = self._run_command(build_command)
if rc != 0:
message = "Failed executing lxc-create."
self.failure(
err=err, rc=rc, msg=message, command=' '.join(build_command)
)
else:
self.state_change = True
def _container_data(self):
"""Returns a dict of container information.
:returns: container data
:rtype: ``dict``
"""
return {
'interfaces': self.container.get_interfaces(),
'ips': self.container.get_ips(),
'state': self._get_state(),
'init_pid': int(self.container.init_pid),
'name' : self.container_name,
}
def _unfreeze(self):
"""Unfreeze a container.
:returns: True or False based on if the container was unfrozen.
:rtype: ``bol``
"""
unfreeze = self.container.unfreeze()
if unfreeze:
self.state_change = True
return unfreeze
def _get_state(self):
"""Return the state of a container.
If the container is not found the state returned is "absent"
:returns: state of a container as a lower case string.
:rtype: ``str``
"""
if self._container_exists(container_name=self.container_name):
return str(self.container.state).lower()
else:
return str('absent')
def _execute_command(self):
"""Execute a shell command."""
container_command = self.module.params.get('container_command')
if container_command:
container_state = self._get_state()
if container_state == 'frozen':
self._unfreeze()
elif container_state == 'stopped':
self._container_startup()
self.container.attach_wait(create_script, container_command)
self.state_change = True
def _container_startup(self, timeout=60):
"""Ensure a container is started.
:param timeout: Time before the destroy operation is abandoned.
:type timeout: ``int``
"""
self.container = self.get_container_bind()
for _ in xrange(timeout):
if self._get_state() != 'running':
self.container.start()
self.state_change = True
# post startup sleep for 1 second.
time.sleep(1)
else:
return True
else:
self.failure(
lxc_container=self._container_data(),
error='Failed to start container'
' [ %s ]' % self.container_name,
rc=1,
msg='The container [ %s ] failed to start. Check to lxc is'
' available and that the container is in a functional'
' state.' % self.container_name
)
def _check_archive(self):
"""Create a compressed archive of a container.
This will store archive_info in as self.archive_info
"""
if self.module.params.get('archive') in BOOLEANS_TRUE:
self.archive_info = {
'archive': self._container_create_tar()
}
def _check_clone(self):
"""Create a compressed archive of a container.
This will store archive_info in as self.archive_info
"""
clone_name = self.module.params.get('clone_name')
if clone_name:
if not self._container_exists(container_name=clone_name):
self.clone_info = {
'cloned': self._container_create_clone()
}
else:
self.clone_info = {
'cloned': False
}
def _destroyed(self, timeout=60):
"""Ensure a container is destroyed.
:param timeout: Time before the destroy operation is abandoned.
:type timeout: ``int``
"""
for _ in xrange(timeout):
if not self._container_exists(container_name=self.container_name):
break
# Check if the container needs to have an archive created.
self._check_archive()
# Check if the container is to be cloned
self._check_clone()
if self._get_state() != 'stopped':
self.state_change = True
self.container.stop()
if self.container.destroy():
self.state_change = True
# post destroy attempt sleep for 1 second.
time.sleep(1)
else:
self.failure(
lxc_container=self._container_data(),
error='Failed to destroy container'
' [ %s ]' % self.container_name,
rc=1,
msg='The container [ %s ] failed to be destroyed. Check'
' that lxc is available and that the container is in a'
' functional state.' % self.container_name
)
def _frozen(self, count=0):
"""Ensure a container is frozen.
If the container does not exist the container will be created.
:param count: number of times this command has been called by itself.
:type count: ``int``
"""
self.check_count(count=count, method='frozen')
if self._container_exists(container_name=self.container_name):
self._execute_command()
# Perform any configuration updates
self._config()
container_state = self._get_state()
if container_state == 'frozen':
pass
elif container_state == 'running':
self.container.freeze()
self.state_change = True
else:
self._container_startup()
self.container.freeze()
self.state_change = True
# Check if the container needs to have an archive created.
self._check_archive()
# Check if the container is to be cloned
self._check_clone()
else:
self._create()
count += 1
self._frozen(count)
def _restarted(self, count=0):
"""Ensure a container is restarted.
If the container does not exist the container will be created.
:param count: number of times this command has been called by itself.
:type count: ``int``
"""
self.check_count(count=count, method='restart')
if self._container_exists(container_name=self.container_name):
self._execute_command()
# Perform any configuration updates
self._config()
if self._get_state() != 'stopped':
self.container.stop()
self.state_change = True
# Run container startup
self._container_startup()
# Check if the container needs to have an archive created.
self._check_archive()
# Check if the container is to be cloned
self._check_clone()
else:
self._create()
count += 1
self._restarted(count)
def _stopped(self, count=0):
"""Ensure a container is stopped.
If the container does not exist the container will be created.
:param count: number of times this command has been called by itself.
:type count: ``int``
"""
self.check_count(count=count, method='stop')
if self._container_exists(container_name=self.container_name):
self._execute_command()
# Perform any configuration updates
self._config()
if self._get_state() != 'stopped':
self.container.stop()
self.state_change = True
# Check if the container needs to have an archive created.
self._check_archive()
# Check if the container is to be cloned
self._check_clone()
else:
self._create()
count += 1
self._stopped(count)
def _started(self, count=0):
"""Ensure a container is started.
If the container does not exist the container will be created.
:param count: number of times this command has been called by itself.
:type count: ``int``
"""
self.check_count(count=count, method='start')
if self._container_exists(container_name=self.container_name):
container_state = self._get_state()
if container_state == 'running':
pass
elif container_state == 'frozen':
self._unfreeze()
elif not self._container_startup():
self.failure(
lxc_container=self._container_data(),
error='Failed to start container'
' [ %s ]' % self.container_name,
rc=1,
msg='The container [ %s ] failed to start. Check to lxc is'
' available and that the container is in a functional'
' state.' % self.container_name
)
# Return data
self._execute_command()
# Perform any configuration updates
self._config()
# Check if the container needs to have an archive created.
self._check_archive()
# Check if the container is to be cloned
self._check_clone()
else:
self._create()
count += 1
self._started(count)
def _get_lxc_vg(self):
"""Return the name of the Volume Group used in LXC."""
build_command = [
self.module.get_bin_path('lxc-config', True),
"lxc.bdev.lvm.vg"
]
rc, vg, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='Failed to read LVM VG from LXC config',
command=' '.join(build_command)
)
else:
return str(vg.strip())
def _lvm_lv_list(self):
"""Return a list of all lv in a current vg."""
vg = self._get_lxc_vg()
build_command = [
self.module.get_bin_path('lvs', True)
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='Failed to get list of LVs',
command=' '.join(build_command)
)
all_lvms = [i.split() for i in stdout.splitlines()][1:]
return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg]
def _get_vg_free_pe(self, vg_name):
"""Return the available size of a given VG.
:param vg_name: Name of volume.
:type vg_name: ``str``
:returns: size and measurement of an LV
:type: ``tuple``
"""
build_command = [
'vgdisplay',
vg_name,
'--units',
'g'
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to read vg %s' % vg_name,
command=' '.join(build_command)
)
vg_info = [i.strip() for i in stdout.splitlines()][1:]
free_pe = [i for i in vg_info if i.startswith('Free')]
_free_pe = free_pe[0].split()
return float(_free_pe[-2]), _free_pe[-1]
def _get_lv_size(self, lv_name):
"""Return the available size of a given LV.
:param lv_name: Name of volume.
:type lv_name: ``str``
:returns: size and measurement of an LV
:type: ``tuple``
"""
vg = self._get_lxc_vg()
lv = os.path.join(vg, lv_name)
build_command = [
'lvdisplay',
lv,
'--units',
'g'
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to read lv %s' % lv,
command=' '.join(build_command)
)
lv_info = [i.strip() for i in stdout.splitlines()][1:]
_free_pe = [i for i in lv_info if i.startswith('LV Size')]
free_pe = _free_pe[0].split()
return self._roundup(float(free_pe[-2])), free_pe[-1]
def _lvm_snapshot_create(self, source_lv, snapshot_name,
snapshot_size_gb=5):
"""Create an LVM snapshot.
:param source_lv: Name of lv to snapshot
:type source_lv: ``str``
:param snapshot_name: Name of lv snapshot
:type snapshot_name: ``str``
:param snapshot_size_gb: Size of snapshot to create
:type snapshot_size_gb: ``int``
"""
vg = self._get_lxc_vg()
free_space, messurement = self._get_vg_free_pe(vg_name=vg)
if free_space < float(snapshot_size_gb):
message = (
'Snapshot size [ %s ] is > greater than [ %s ] on volume group'
' [ %s ]' % (snapshot_size_gb, free_space, vg)
)
self.failure(
error='Not enough space to create snapshot',
rc=2,
msg=message
)
# Create LVM Snapshot
build_command = [
self.module.get_bin_path('lvcreate', True),
"-n",
snapshot_name,
"-s",
os.path.join(vg, source_lv),
"-L%sg" % snapshot_size_gb
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='Failed to Create LVM snapshot %s/%s --> %s'
% (vg, source_lv, snapshot_name)
)
def _lvm_lv_mount(self, lv_name, mount_point):
"""mount an lv.
:param lv_name: name of the logical volume to mount
:type lv_name: ``str``
:param mount_point: path on the file system that is mounted.
:type mount_point: ``str``
"""
vg = self._get_lxc_vg()
build_command = [
self.module.get_bin_path('mount', True),
"/dev/%s/%s" % (vg, lv_name),
mount_point,
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to mountlvm lv %s/%s to %s'
% (vg, lv_name, mount_point)
)
def _create_tar(self, source_dir):
"""Create an archive of a given ``source_dir`` to ``output_path``.
:param source_dir: Path to the directory to be archived.
:type source_dir: ``str``
"""
old_umask = os.umask(int('0077',8))
archive_path = self.module.params.get('archive_path')
if not os.path.isdir(archive_path):
os.makedirs(archive_path)
archive_compression = self.module.params.get('archive_compression')
compression_type = LXC_COMPRESSION_MAP[archive_compression]
# remove trailing / if present.
archive_name = '%s.%s' % (
os.path.join(
archive_path,
self.container_name
),
compression_type['extension']
)
build_command = [
self.module.get_bin_path('tar', True),
'--directory=%s' % os.path.realpath(
os.path.expanduser(source_dir)
),
compression_type['argument'],
archive_name,
'.'
]
rc, stdout, err = self._run_command(
build_command=build_command,
unsafe_shell=True
)
os.umask(old_umask)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to create tar archive',
command=' '.join(build_command)
)
return archive_name
def _lvm_lv_remove(self, lv_name):
"""Remove an LV.
:param lv_name: The name of the logical volume
:type lv_name: ``str``
"""
vg = self._get_lxc_vg()
build_command = [
self.module.get_bin_path('lvremove', True),
"-f",
"%s/%s" % (vg, lv_name),
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='Failed to remove LVM LV %s/%s' % (vg, lv_name),
command=' '.join(build_command)
)
def _rsync_data(self, container_path, temp_dir):
"""Sync the container directory to the temp directory.
:param container_path: path to the container container
:type container_path: ``str``
:param temp_dir: path to the temporary local working directory
:type temp_dir: ``str``
"""
# This loop is created to support overlayfs archives. This should
# squash all of the layers into a single archive.
fs_paths = container_path.split(':')
if 'overlayfs' in fs_paths:
fs_paths.pop(fs_paths.index('overlayfs'))
for fs_path in fs_paths:
# Set the path to the container data
fs_path = os.path.dirname(fs_path)
# Run the sync command
build_command = [
self.module.get_bin_path('rsync', True),
'-aHAX',
fs_path,
temp_dir
]
rc, stdout, err = self._run_command(
build_command,
unsafe_shell=True
)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to perform archive',
command=' '.join(build_command)
)
def _unmount(self, mount_point):
"""Unmount a file system.
:param mount_point: path on the file system that is mounted.
:type mount_point: ``str``
"""
build_command = [
self.module.get_bin_path('umount', True),
mount_point,
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to unmount [ %s ]' % mount_point,
command=' '.join(build_command)
)
def _overlayfs_mount(self, lowerdir, upperdir, mount_point):
"""mount an lv.
:param lowerdir: name/path of the lower directory
:type lowerdir: ``str``
:param upperdir: name/path of the upper directory
:type upperdir: ``str``
:param mount_point: path on the file system that is mounted.
:type mount_point: ``str``
"""
build_command = [
self.module.get_bin_path('mount', True),
'-t overlayfs',
'-o lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
'overlayfs',
mount_point,
]
rc, stdout, err = self._run_command(build_command)
if rc != 0:
self.failure(
err=err,
rc=rc,
msg='failed to mount overlayfs:%s:%s to %s -- Command: %s'
% (lowerdir, upperdir, mount_point, build_command)
)
def _container_create_tar(self):
"""Create a tar archive from an LXC container.
The process is as follows:
* Stop or Freeze the container
* Create temporary dir
* Copy container and config to temporary directory
* If LVM backed:
* Create LVM snapshot of LV backing the container
* Mount the snapshot to tmpdir/rootfs
* Restore the state of the container
* Create tar of tmpdir
* Clean up
"""
# Create a temp dir
temp_dir = tempfile.mkdtemp()
# Set the name of the working dir, temp + container_name
work_dir = os.path.join(temp_dir, self.container_name)
# LXC container rootfs
lxc_rootfs = self.container.get_config_item('lxc.rootfs')
# Test if the containers rootfs is a block device
block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev'))
# Test if the container is using overlayfs
overlayfs_backed = lxc_rootfs.startswith('overlayfs')
mount_point = os.path.join(work_dir, 'rootfs')
# Set the snapshot name if needed
snapshot_name = '%s_lxc_snapshot' % self.container_name
container_state = self._get_state()
try:
# Ensure the original container is stopped or frozen
if container_state not in ['stopped', 'frozen']:
if container_state == 'running':
self.container.freeze()
else:
self.container.stop()
# Sync the container data from the container_path to work_dir
self._rsync_data(lxc_rootfs, temp_dir)
if block_backed:
if snapshot_name not in self._lvm_lv_list():
if not os.path.exists(mount_point):
os.makedirs(mount_point)
# Take snapshot
size, measurement = self._get_lv_size(
lv_name=self.container_name
)
self._lvm_snapshot_create(
source_lv=self.container_name,
snapshot_name=snapshot_name,
snapshot_size_gb=size
)
# Mount snapshot
self._lvm_lv_mount(
lv_name=snapshot_name,
mount_point=mount_point
)
else:
self.failure(
err='snapshot [ %s ] already exists' % snapshot_name,
rc=1,
msg='The snapshot [ %s ] already exists. Please clean'
' up old snapshot of containers before continuing.'
% snapshot_name
)
elif overlayfs_backed:
lowerdir, upperdir = lxc_rootfs.split(':')[1:]
self._overlayfs_mount(
lowerdir=lowerdir,
upperdir=upperdir,
mount_point=mount_point
)
# Set the state as changed and set a new fact
self.state_change = True
return self._create_tar(source_dir=work_dir)
finally:
if block_backed or overlayfs_backed:
# unmount snapshot
self._unmount(mount_point)
if block_backed:
# Remove snapshot
self._lvm_lv_remove(snapshot_name)
# Restore original state of container
if container_state == 'running':
if self._get_state() == 'frozen':
self.container.unfreeze()
else:
self.container.start()
# Remove tmpdir
shutil.rmtree(temp_dir)
def check_count(self, count, method):
if count > 1:
self.failure(
error='Failed to %s container' % method,
rc=1,
msg='The container [ %s ] failed to %s. Check to lxc is'
' available and that the container is in a functional'
' state.' % (self.container_name, method)
)
def failure(self, **kwargs):
"""Return a Failure when running an Ansible command.
:param error: ``str`` Error that occurred.
:param rc: ``int`` Return code while executing an Ansible command.
:param msg: ``str`` Message to report.
"""
self.module.fail_json(**kwargs)
def run(self):
"""Run the main method."""
action = getattr(self, LXC_ANSIBLE_STATES[self.state])
action()
outcome = self._container_data()
if self.archive_info:
outcome.update(self.archive_info)
if self.clone_info:
outcome.update(self.clone_info)
self.module.exit_json(
changed=self.state_change,
lxc_container=outcome
)
def main():
"""Ansible Main module."""
module = AnsibleModule(
argument_spec=dict(
name=dict(
type='str',
required=True
),
template=dict(
type='str',
default='ubuntu'
),
backing_store=dict(
type='str',
choices=LXC_BACKING_STORE.keys(),
default='dir'
),
template_options=dict(
type='str'
),
config=dict(
type='path',
),
vg_name=dict(
type='str',
default='lxc'
),
thinpool=dict(
type='str'
),
fs_type=dict(
type='str',
default='ext4'
),
fs_size=dict(
type='str',
default='5G'
),
directory=dict(
type='path'
),
zfs_root=dict(
type='str'
),
lv_name=dict(
type='str'
),
lxc_path=dict(
type='path'
),
state=dict(
choices=LXC_ANSIBLE_STATES.keys(),
default='started'
),
container_command=dict(
type='str'
),
container_config=dict(
type='str'
),
container_log=dict(
type='bool',
default='false'
),
container_log_level=dict(
choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i],
default='INFO'
),
clone_name=dict(
type='str',
required=False
),
clone_snapshot=dict(
type='bool',
default='false'
),
archive=dict(
type='bool',
default='false'
),
archive_path=dict(
type='path',
),
archive_compression=dict(
choices=LXC_COMPRESSION_MAP.keys(),
default='gzip'
)
),
supports_check_mode=False,
required_if = ([
('archive', True, ['archive_path'])
]),
)
if not HAS_LXC:
module.fail_json(
msg='The `lxc` module is not importable. Check the requirements.'
)
lv_name = module.params.get('lv_name')
if not lv_name:
module.params['lv_name'] = module.params.get('name')
lxc_manage = LxcContainerManagement(module=module)
lxc_manage.run()
# import module bits
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
FusionSP/android_external_chromium_org | tools/telemetry/telemetry/core/backends/form_based_credentials_backend_unittest_base.py | 26 | 4673 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import unittest
from telemetry import benchmark
from telemetry.core import browser_finder
from telemetry.core import util
from telemetry.unittest import options_for_unittests
from telemetry.unittest import simple_mock
_ = simple_mock.DONT_CARE
def _GetCredentialsPath():
# TODO: This shouldn't depend on tools/perf.
credentials_path = os.path.join(util.GetChromiumSrcDir(),
'tools', 'perf', 'data', 'credentials.json')
if not os.path.exists(credentials_path):
return None
return credentials_path
class FormBasedCredentialsBackendUnitTestBase(unittest.TestCase):
def setUp(self):
self._credentials_type = None
@benchmark.Disabled
def testRealLoginIfPossible(self):
credentials_path = _GetCredentialsPath()
if not credentials_path:
logging.warning('Credentials file not found, skipping test.')
return
options = options_for_unittests.GetCopy()
with browser_finder.FindBrowser(options).Create() as b:
b.credentials.credentials_path = credentials_path
if not b.credentials.CanLogin(self._credentials_type):
return
ret = b.credentials.LoginNeeded(b.tabs[0], self._credentials_type)
self.assertTrue(ret)
@benchmark.Disabled
def testRealLoginWithDontOverrideProfileIfPossible(self):
credentials_path = _GetCredentialsPath()
if not credentials_path:
logging.warning('Credentials file not found, skipping test.')
return
options = options_for_unittests.GetCopy()
# Login once to make sure our default profile is logged in.
with browser_finder.FindBrowser(options).Create() as b:
b.credentials.credentials_path = credentials_path
if not b.credentials.CanLogin(self._credentials_type):
return
tab = b.tabs[0]
# Should not be logged in, since this is a fresh credentials
# instance.
self.assertFalse(b.credentials.IsLoggedIn(self._credentials_type))
# Log in.
ret = b.credentials.LoginNeeded(tab, self._credentials_type)
# Make sure login was successful.
self.assertTrue(ret)
self.assertTrue(b.credentials.IsLoggedIn(self._credentials_type))
# Reset state. Now the backend thinks we're logged out, even
# though we are logged in in our current browser session. This
# simulates the effects of running with --dont-override-profile.
b.credentials._ResetLoggedInState() # pylint: disable=W0212
# Make sure the backend thinks we're logged out.
self.assertFalse(b.credentials.IsLoggedIn(self._credentials_type))
self.assertTrue(b.credentials.CanLogin(self._credentials_type))
# Attempt to login again. This should detect that we've hit
# the 'logged in' page instead of the login form, and succeed
# instead of timing out.
ret = b.credentials.LoginNeeded(tab, self._credentials_type)
# Make sure our login attempt did in fact succeed and set the
# backend's internal state to 'logged in'.
self.assertTrue(ret)
self.assertTrue(b.credentials.IsLoggedIn(self._credentials_type))
def testLoginUsingMock(self):
raise NotImplementedError()
def _LoginUsingMock(self, backend, login_page_url, email_element_id,
password_element_id, form_element_id,
already_logged_in_js): # pylint: disable=R0201
tab = simple_mock.MockObject()
ar = simple_mock.MockObject()
config = {'username': 'blah',
'password': 'blargh'}
tab.ExpectCall('Navigate', login_page_url)
tab.ExpectCall('EvaluateJavaScript', already_logged_in_js).WillReturn(False)
tab.ExpectCall('WaitForDocumentReadyStateToBeInteractiveOrBetter')
ar.ExpectCall('WaitForJavaScriptCondition',
'(document.querySelector("#%s") !== null) || (%s)' % (
form_element_id, already_logged_in_js), 60)
ar.ExpectCall('WaitForNavigate')
def VerifyEmail(js):
assert email_element_id in js
assert 'blah' in js
tab.ExpectCall('ExecuteJavaScript', _).WhenCalled(VerifyEmail)
def VerifyPw(js):
assert password_element_id in js
assert 'largh' in js
tab.ExpectCall('ExecuteJavaScript', _).WhenCalled(VerifyPw)
def VerifySubmit(js):
assert '.submit' in js or '.click' in js
tab.ExpectCall('ExecuteJavaScript', _).WhenCalled(VerifySubmit)
# Checking for form still up.
tab.ExpectCall('EvaluateJavaScript', _).WillReturn(False)
backend.LoginNeeded(tab, ar, config)
| bsd-3-clause |
vietch2612/phantomjs | src/qt/qtbase/src/3rdparty/freetype/src/tools/docmaker/content.py | 293 | 17668 | # Content (c) 2002, 2004, 2006, 2007, 2008, 2009
# David Turner <david@freetype.org>
#
# This file contains routines used to parse the content of documentation
# comment blocks and build more structured objects out of them.
#
from sources import *
from utils import *
import string, re
# this regular expression is used to detect code sequences. these
# are simply code fragments embedded in '{' and '}' like in:
#
# {
# x = y + z;
# if ( zookoo == 2 )
# {
# foobar();
# }
# }
#
# note that indentation of the starting and ending accolades must be
# exactly the same. the code sequence can contain accolades at greater
# indentation
#
re_code_start = re.compile( r"(\s*){\s*$" )
re_code_end = re.compile( r"(\s*)}\s*$" )
# this regular expression is used to isolate identifiers from
# other text
#
re_identifier = re.compile( r'(\w*)' )
# we collect macros ending in `_H'; while outputting the object data, we use
# this info together with the object's file location to emit the appropriate
# header file macro and name before the object itself
#
re_header_macro = re.compile( r'^#define\s{1,}(\w{1,}_H)\s{1,}<(.*)>' )
#############################################################################
#
# The DocCode class is used to store source code lines.
#
# 'self.lines' contains a set of source code lines that will be dumped as
# HTML in a <PRE> tag.
#
# The object is filled line by line by the parser; it strips the leading
# "margin" space from each input line before storing it in 'self.lines'.
#
class DocCode:
def __init__( self, margin, lines ):
self.lines = []
self.words = None
# remove margin spaces
for l in lines:
if string.strip( l[:margin] ) == "":
l = l[margin:]
self.lines.append( l )
def dump( self, prefix = "", width = 60 ):
lines = self.dump_lines( 0, width )
for l in lines:
print prefix + l
def dump_lines( self, margin = 0, width = 60 ):
result = []
for l in self.lines:
result.append( " " * margin + l )
return result
#############################################################################
#
# The DocPara class is used to store "normal" text paragraph.
#
# 'self.words' contains the list of words that make up the paragraph
#
class DocPara:
def __init__( self, lines ):
self.lines = None
self.words = []
for l in lines:
l = string.strip( l )
self.words.extend( string.split( l ) )
def dump( self, prefix = "", width = 60 ):
lines = self.dump_lines( 0, width )
for l in lines:
print prefix + l
def dump_lines( self, margin = 0, width = 60 ):
cur = "" # current line
col = 0 # current width
result = []
for word in self.words:
ln = len( word )
if col > 0:
ln = ln + 1
if col + ln > width:
result.append( " " * margin + cur )
cur = word
col = len( word )
else:
if col > 0:
cur = cur + " "
cur = cur + word
col = col + ln
if col > 0:
result.append( " " * margin + cur )
return result
#############################################################################
#
# The DocField class is used to store a list containing either DocPara or
# DocCode objects. Each DocField also has an optional "name" which is used
# when the object corresponds to a field or value definition
#
class DocField:
def __init__( self, name, lines ):
self.name = name # can be None for normal paragraphs/sources
self.items = [] # list of items
mode_none = 0 # start parsing mode
mode_code = 1 # parsing code sequences
mode_para = 3 # parsing normal paragraph
margin = -1 # current code sequence indentation
cur_lines = []
# now analyze the markup lines to see if they contain paragraphs,
# code sequences or fields definitions
#
start = 0
mode = mode_none
for l in lines:
# are we parsing a code sequence ?
if mode == mode_code:
m = re_code_end.match( l )
if m and len( m.group( 1 ) ) <= margin:
# that's it, we finished the code sequence
code = DocCode( 0, cur_lines )
self.items.append( code )
margin = -1
cur_lines = []
mode = mode_none
else:
# nope, continue the code sequence
cur_lines.append( l[margin:] )
else:
# start of code sequence ?
m = re_code_start.match( l )
if m:
# save current lines
if cur_lines:
para = DocPara( cur_lines )
self.items.append( para )
cur_lines = []
# switch to code extraction mode
margin = len( m.group( 1 ) )
mode = mode_code
else:
if not string.split( l ) and cur_lines:
# if the line is empty, we end the current paragraph,
# if any
para = DocPara( cur_lines )
self.items.append( para )
cur_lines = []
else:
# otherwise, simply add the line to the current
# paragraph
cur_lines.append( l )
if mode == mode_code:
# unexpected end of code sequence
code = DocCode( margin, cur_lines )
self.items.append( code )
elif cur_lines:
para = DocPara( cur_lines )
self.items.append( para )
def dump( self, prefix = "" ):
if self.field:
print prefix + self.field + " ::"
prefix = prefix + "----"
first = 1
for p in self.items:
if not first:
print ""
p.dump( prefix )
first = 0
def dump_lines( self, margin = 0, width = 60 ):
result = []
nl = None
for p in self.items:
if nl:
result.append( "" )
result.extend( p.dump_lines( margin, width ) )
nl = 1
return result
# this regular expression is used to detect field definitions
#
re_field = re.compile( r"\s*(\w*|\w(\w|\.)*\w)\s*::" )
class DocMarkup:
def __init__( self, tag, lines ):
self.tag = string.lower( tag )
self.fields = []
cur_lines = []
field = None
mode = 0
for l in lines:
m = re_field.match( l )
if m:
# we detected the start of a new field definition
# first, save the current one
if cur_lines:
f = DocField( field, cur_lines )
self.fields.append( f )
cur_lines = []
field = None
field = m.group( 1 ) # record field name
ln = len( m.group( 0 ) )
l = " " * ln + l[ln:]
cur_lines = [l]
else:
cur_lines.append( l )
if field or cur_lines:
f = DocField( field, cur_lines )
self.fields.append( f )
def get_name( self ):
try:
return self.fields[0].items[0].words[0]
except:
return None
def get_start( self ):
try:
result = ""
for word in self.fields[0].items[0].words:
result = result + " " + word
return result[1:]
except:
return "ERROR"
def dump( self, margin ):
print " " * margin + "<" + self.tag + ">"
for f in self.fields:
f.dump( " " )
print " " * margin + "</" + self.tag + ">"
class DocChapter:
def __init__( self, block ):
self.block = block
self.sections = []
if block:
self.name = block.name
self.title = block.get_markup_words( "title" )
self.order = block.get_markup_words( "sections" )
else:
self.name = "Other"
self.title = string.split( "Miscellaneous" )
self.order = []
class DocSection:
def __init__( self, name = "Other" ):
self.name = name
self.blocks = {}
self.block_names = [] # ordered block names in section
self.defs = []
self.abstract = ""
self.description = ""
self.order = []
self.title = "ERROR"
self.chapter = None
def add_def( self, block ):
self.defs.append( block )
def add_block( self, block ):
self.block_names.append( block.name )
self.blocks[block.name] = block
def process( self ):
# look up one block that contains a valid section description
for block in self.defs:
title = block.get_markup_text( "title" )
if title:
self.title = title
self.abstract = block.get_markup_words( "abstract" )
self.description = block.get_markup_items( "description" )
self.order = block.get_markup_words( "order" )
return
def reorder( self ):
self.block_names = sort_order_list( self.block_names, self.order )
class ContentProcessor:
def __init__( self ):
"""initialize a block content processor"""
self.reset()
self.sections = {} # dictionary of documentation sections
self.section = None # current documentation section
self.chapters = [] # list of chapters
self.headers = {} # dictionary of header macros
def set_section( self, section_name ):
"""set current section during parsing"""
if not self.sections.has_key( section_name ):
section = DocSection( section_name )
self.sections[section_name] = section
self.section = section
else:
self.section = self.sections[section_name]
def add_chapter( self, block ):
chapter = DocChapter( block )
self.chapters.append( chapter )
def reset( self ):
"""reset the content processor for a new block"""
self.markups = []
self.markup = None
self.markup_lines = []
def add_markup( self ):
"""add a new markup section"""
if self.markup and self.markup_lines:
# get rid of last line of markup if it's empty
marks = self.markup_lines
if len( marks ) > 0 and not string.strip( marks[-1] ):
self.markup_lines = marks[:-1]
m = DocMarkup( self.markup, self.markup_lines )
self.markups.append( m )
self.markup = None
self.markup_lines = []
def process_content( self, content ):
"""process a block content and return a list of DocMarkup objects
corresponding to it"""
markup = None
markup_lines = []
first = 1
for line in content:
found = None
for t in re_markup_tags:
m = t.match( line )
if m:
found = string.lower( m.group( 1 ) )
prefix = len( m.group( 0 ) )
line = " " * prefix + line[prefix:] # remove markup from line
break
# is it the start of a new markup section ?
if found:
first = 0
self.add_markup() # add current markup content
self.markup = found
if len( string.strip( line ) ) > 0:
self.markup_lines.append( line )
elif first == 0:
self.markup_lines.append( line )
self.add_markup()
return self.markups
def parse_sources( self, source_processor ):
blocks = source_processor.blocks
count = len( blocks )
for n in range( count ):
source = blocks[n]
if source.content:
# this is a documentation comment, we need to catch
# all following normal blocks in the "follow" list
#
follow = []
m = n + 1
while m < count and not blocks[m].content:
follow.append( blocks[m] )
m = m + 1
doc_block = DocBlock( source, follow, self )
def finish( self ):
# process all sections to extract their abstract, description
# and ordered list of items
#
for sec in self.sections.values():
sec.process()
# process chapters to check that all sections are correctly
# listed there
for chap in self.chapters:
for sec in chap.order:
if self.sections.has_key( sec ):
section = self.sections[sec]
section.chapter = chap
section.reorder()
chap.sections.append( section )
else:
sys.stderr.write( "WARNING: chapter '" + \
chap.name + "' in " + chap.block.location() + \
" lists unknown section '" + sec + "'\n" )
# check that all sections are in a chapter
#
others = []
for sec in self.sections.values():
if not sec.chapter:
others.append( sec )
# create a new special chapter for all remaining sections
# when necessary
#
if others:
chap = DocChapter( None )
chap.sections = others
self.chapters.append( chap )
class DocBlock:
def __init__( self, source, follow, processor ):
processor.reset()
self.source = source
self.code = []
self.type = "ERRTYPE"
self.name = "ERRNAME"
self.section = processor.section
self.markups = processor.process_content( source.content )
# compute block type from first markup tag
try:
self.type = self.markups[0].tag
except:
pass
# compute block name from first markup paragraph
try:
markup = self.markups[0]
para = markup.fields[0].items[0]
name = para.words[0]
m = re_identifier.match( name )
if m:
name = m.group( 1 )
self.name = name
except:
pass
if self.type == "section":
# detect new section starts
processor.set_section( self.name )
processor.section.add_def( self )
elif self.type == "chapter":
# detect new chapter
processor.add_chapter( self )
else:
processor.section.add_block( self )
# now, compute the source lines relevant to this documentation
# block. We keep normal comments in for obvious reasons (??)
source = []
for b in follow:
if b.format:
break
for l in b.lines:
# collect header macro definitions
m = re_header_macro.match( l )
if m:
processor.headers[m.group( 2 )] = m.group( 1 );
# we use "/* */" as a separator
if re_source_sep.match( l ):
break
source.append( l )
# now strip the leading and trailing empty lines from the sources
start = 0
end = len( source ) - 1
while start < end and not string.strip( source[start] ):
start = start + 1
while start < end and not string.strip( source[end] ):
end = end - 1
if start == end and not string.strip( source[start] ):
self.code = []
else:
self.code = source[start:end + 1]
def location( self ):
return self.source.location()
def get_markup( self, tag_name ):
"""return the DocMarkup corresponding to a given tag in a block"""
for m in self.markups:
if m.tag == string.lower( tag_name ):
return m
return None
def get_markup_name( self, tag_name ):
"""return the name of a given primary markup in a block"""
try:
m = self.get_markup( tag_name )
return m.get_name()
except:
return None
def get_markup_words( self, tag_name ):
try:
m = self.get_markup( tag_name )
return m.fields[0].items[0].words
except:
return []
def get_markup_text( self, tag_name ):
result = self.get_markup_words( tag_name )
return string.join( result )
def get_markup_items( self, tag_name ):
try:
m = self.get_markup( tag_name )
return m.fields[0].items
except:
return None
# eof
| bsd-3-clause |
u2009cf/spark-radar | examples/src/main/python/mllib/random_rdd_generation.py | 85 | 1940 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Randomly generated RDDs.
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.mllib.random import RandomRDDs
if __name__ == "__main__":
if len(sys.argv) not in [1, 2]:
print("Usage: random_rdd_generation", file=sys.stderr)
exit(-1)
sc = SparkContext(appName="PythonRandomRDDGeneration")
numExamples = 10000 # number of examples to generate
fraction = 0.1 # fraction of data to sample
# Example: RandomRDDs.normalRDD
normalRDD = RandomRDDs.normalRDD(sc, numExamples)
print('Generated RDD of %d examples sampled from the standard normal distribution'
% normalRDD.count())
print(' First 5 samples:')
for sample in normalRDD.take(5):
print(' ' + str(sample))
print()
# Example: RandomRDDs.normalVectorRDD
normalVectorRDD = RandomRDDs.normalVectorRDD(sc, numRows=numExamples, numCols=2)
print('Generated RDD of %d examples of length-2 vectors.' % normalVectorRDD.count())
print(' First 5 samples:')
for sample in normalVectorRDD.take(5):
print(' ' + str(sample))
print()
sc.stop()
| apache-2.0 |
twitchyliquid64/misc-scripts | s3tool/boto/ec2/autoscale/launchconfig.py | 135 | 10807 | # Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.ec2.elb.listelement import ListElement
# Namespacing issue with deprecated local class
from boto.ec2.blockdevicemapping import BlockDeviceMapping as BDM
from boto.resultset import ResultSet
import boto.utils
import base64
# this should use the corresponding object from boto.ec2
# Currently in use by deprecated local BlockDeviceMapping class
class Ebs(object):
def __init__(self, connection=None, snapshot_id=None, volume_size=None):
self.connection = connection
self.snapshot_id = snapshot_id
self.volume_size = volume_size
def __repr__(self):
return 'Ebs(%s, %s)' % (self.snapshot_id, self.volume_size)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'SnapshotId':
self.snapshot_id = value
elif name == 'VolumeSize':
self.volume_size = value
class InstanceMonitoring(object):
def __init__(self, connection=None, enabled='false'):
self.connection = connection
self.enabled = enabled
def __repr__(self):
return 'InstanceMonitoring(%s)' % self.enabled
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Enabled':
self.enabled = value
# this should use the BlockDeviceMapping from boto.ec2.blockdevicemapping
# Currently in use by deprecated code for backwards compatability
# Removing this class can also remove the Ebs class in this same file
class BlockDeviceMapping(object):
def __init__(self, connection=None, device_name=None, virtual_name=None,
ebs=None, no_device=None):
self.connection = connection
self.device_name = device_name
self.virtual_name = virtual_name
self.ebs = ebs
self.no_device = no_device
def __repr__(self):
return 'BlockDeviceMapping(%s, %s)' % (self.device_name,
self.virtual_name)
def startElement(self, name, attrs, connection):
if name == 'Ebs':
self.ebs = Ebs(self)
return self.ebs
def endElement(self, name, value, connection):
if name == 'DeviceName':
self.device_name = value
elif name == 'VirtualName':
self.virtual_name = value
elif name == 'NoDevice':
self.no_device = bool(value)
class LaunchConfiguration(object):
def __init__(self, connection=None, name=None, image_id=None,
key_name=None, security_groups=None, user_data=None,
instance_type='m1.small', kernel_id=None,
ramdisk_id=None, block_device_mappings=None,
instance_monitoring=False, spot_price=None,
instance_profile_name=None, ebs_optimized=False,
associate_public_ip_address=None, volume_type=None,
delete_on_termination=True, iops=None,
use_block_device_types=False, classic_link_vpc_id=None,
classic_link_vpc_security_groups=None):
"""
A launch configuration.
:type name: str
:param name: Name of the launch configuration to create.
:type image_id: str
:param image_id: Unique ID of the Amazon Machine Image (AMI) which was
assigned during registration.
:type key_name: str
:param key_name: The name of the EC2 key pair.
:type security_groups: list
:param security_groups: Names or security group id's of the security
groups with which to associate the EC2 instances or VPC instances,
respectively.
:type user_data: str
:param user_data: The user data available to launched EC2 instances.
:type instance_type: str
:param instance_type: The instance type
:type kernel_id: str
:param kernel_id: Kernel id for instance
:type ramdisk_id: str
:param ramdisk_id: RAM disk id for instance
:type block_device_mappings: list
:param block_device_mappings: Specifies how block devices are exposed
for instances
:type instance_monitoring: bool
:param instance_monitoring: Whether instances in group are launched
with detailed monitoring.
:type spot_price: float
:param spot_price: The spot price you are bidding. Only applies
if you are building an autoscaling group with spot instances.
:type instance_profile_name: string
:param instance_profile_name: The name or the Amazon Resource
Name (ARN) of the instance profile associated with the IAM
role for the instance.
:type ebs_optimized: bool
:param ebs_optimized: Specifies whether the instance is optimized
for EBS I/O (true) or not (false).
:type associate_public_ip_address: bool
:param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud.
Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.
:type classic_link_vpc_id: str
:param classic_link_vpc_id: ID of ClassicLink enabled VPC.
:type classic_link_vpc_security_groups: list
:param classic_link_vpc_security_groups: Security group
id's of the security groups with which to associate the
ClassicLink VPC instances.
"""
self.connection = connection
self.name = name
self.instance_type = instance_type
self.block_device_mappings = block_device_mappings
self.key_name = key_name
sec_groups = security_groups or []
self.security_groups = ListElement(sec_groups)
self.image_id = image_id
self.ramdisk_id = ramdisk_id
self.created_time = None
self.kernel_id = kernel_id
self.user_data = user_data
self.created_time = None
self.instance_monitoring = instance_monitoring
self.spot_price = spot_price
self.instance_profile_name = instance_profile_name
self.launch_configuration_arn = None
self.ebs_optimized = ebs_optimized
self.associate_public_ip_address = associate_public_ip_address
self.volume_type = volume_type
self.delete_on_termination = delete_on_termination
self.iops = iops
self.use_block_device_types = use_block_device_types
self.classic_link_vpc_id = classic_link_vpc_id
classic_link_vpc_sec_groups = classic_link_vpc_security_groups or []
self.classic_link_vpc_security_groups = \
ListElement(classic_link_vpc_sec_groups)
if connection is not None:
self.use_block_device_types = connection.use_block_device_types
def __repr__(self):
return 'LaunchConfiguration:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'SecurityGroups':
return self.security_groups
elif name == 'ClassicLinkVPCSecurityGroups':
return self.classic_link_vpc_security_groups
elif name == 'BlockDeviceMappings':
if self.use_block_device_types:
self.block_device_mappings = BDM()
else:
self.block_device_mappings = ResultSet([('member', BlockDeviceMapping)])
return self.block_device_mappings
elif name == 'InstanceMonitoring':
self.instance_monitoring = InstanceMonitoring(self)
return self.instance_monitoring
def endElement(self, name, value, connection):
if name == 'InstanceType':
self.instance_type = value
elif name == 'LaunchConfigurationName':
self.name = value
elif name == 'KeyName':
self.key_name = value
elif name == 'ImageId':
self.image_id = value
elif name == 'CreatedTime':
self.created_time = boto.utils.parse_ts(value)
elif name == 'KernelId':
self.kernel_id = value
elif name == 'RamdiskId':
self.ramdisk_id = value
elif name == 'UserData':
try:
self.user_data = base64.b64decode(value)
except TypeError:
self.user_data = value
elif name == 'LaunchConfigurationARN':
self.launch_configuration_arn = value
elif name == 'InstanceMonitoring':
self.instance_monitoring = value
elif name == 'SpotPrice':
self.spot_price = float(value)
elif name == 'IamInstanceProfile':
self.instance_profile_name = value
elif name == 'EbsOptimized':
self.ebs_optimized = True if value.lower() == 'true' else False
elif name == 'AssociatePublicIpAddress':
self.associate_public_ip_address = True if value.lower() == 'true' else False
elif name == 'VolumeType':
self.volume_type = value
elif name == 'DeleteOnTermination':
if value.lower() == 'true':
self.delete_on_termination = True
else:
self.delete_on_termination = False
elif name == 'Iops':
self.iops = int(value)
elif name == 'ClassicLinkVPCId':
self.classic_link_vpc_id = value
else:
setattr(self, name, value)
def delete(self):
""" Delete this launch configuration. """
return self.connection.delete_launch_configuration(self.name)
| mit |
wulin9005/cocosbuilder | CocosBuilder/libs/nodejs/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| mit |
drawks/ansible | lib/ansible/modules/cloud/vmware/vmware_guest_vnc.py | 53 | 7341 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, Armin Ranjbar Daemi <armin@webair.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_guest_vnc
short_description: Manages VNC remote display on virtual machines in vCenter
description:
- This module can be used to enable and disable VNC remote display on virtual machine.
version_added: 2.8
author:
- Armin Ranjbar Daemi (@rmin)
requirements:
- python >= 2.6
- PyVmomi
options:
datacenter:
description:
- Destination datacenter for the deploy operation.
- This parameter is case sensitive.
default: ha-datacenter
state:
description:
- Set the state of VNC on virtual machine.
choices: [present, absent]
default: present
required: false
name:
description:
- Name of the virtual machine to work with.
- Virtual machine names in vCenter are not necessarily unique, which may be problematic, see C(name_match).
required: false
name_match:
description:
- If multiple virtual machines matching the name, use the first or last found.
default: first
choices: [first, last]
required: false
uuid:
description:
- UUID of the instance to manage if known, this is VMware's unique identifier.
- This is required, if C(name) is not supplied.
required: false
folder:
description:
- Destination folder, absolute or relative path to find an existing guest.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter
required: false
vnc_ip:
description:
- Sets an IP for VNC on virtual machine.
- This is required only when I(state) is set to present and will be ignored if I(state) is absent.
default: 0.0.0.0
required: false
vnc_port:
description:
- The port that VNC listens on. Usually a number between 5900 and 7000 depending on your config.
- This is required only when I(state) is set to present and will be ignored if I(state) is absent.
default: 0
required: false
vnc_password:
description:
- Sets a password for VNC on virtual machine.
- This is required only when I(state) is set to present and will be ignored if I(state) is absent.
default: ""
required: false
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Enable VNC remote display on the VM
vmware_guest_vnc:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
folder: /mydatacenter/vm
name: testvm1
vnc_port: 5990
vnc_password: vNc5ecr3t
datacenter: "{{ datacenter_name }}"
state: present
delegate_to: localhost
register: vnc_result
- name: Disable VNC remote display on the VM
vmware_guest_vnc:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: "{{ datacenter_name }}"
uuid: 32074771-7d6b-699a-66a8-2d9cf8236fff
state: absent
delegate_to: localhost
register: vnc_result
'''
RETURN = '''
changed:
description: If anything changed on VM's extraConfig.
returned: always
type: bool
failed:
description: If changes failed.
returned: always
type: bool
instance:
description: Dictionary describing the VM, including VNC info.
returned: On success in both I(state)
type: dict
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, get_vnc_extraconfig, wait_for_task, gather_vm_facts, TaskError
from ansible.module_utils._text import to_native
def set_vnc_extraconfig(content, vm, enabled, ip, port, password):
result = dict(
changed=False,
failed=False,
)
# set new values
key_prefix = "remotedisplay.vnc."
new_values = dict()
for key in ['enabled', 'ip', 'port', 'password']:
new_values[key_prefix + key] = ""
if enabled:
new_values[key_prefix + "enabled"] = "true"
new_values[key_prefix + "password"] = str(password).strip()
new_values[key_prefix + "ip"] = str(ip).strip()
new_values[key_prefix + "port"] = str(port).strip()
# get current vnc config
current_values = get_vnc_extraconfig(vm)
# check if any value is changed
reconfig_vm = False
for key, val in new_values.items():
key = key.replace(key_prefix, "")
current_value = current_values.get(key, "")
# enabled is not case-sensitive
if key == "enabled":
current_value = current_value.lower()
val = val.lower()
if current_value != val:
reconfig_vm = True
if not reconfig_vm:
return result
# reconfigure vm
spec = vim.vm.ConfigSpec()
spec.extraConfig = []
for key, val in new_values.items():
opt = vim.option.OptionValue()
opt.key = key
opt.value = val
spec.extraConfig.append(opt)
task = vm.ReconfigVM_Task(spec)
try:
wait_for_task(task)
except TaskError as task_err:
result['failed'] = True
result['msg'] = to_native(task_err)
if task.info.state == 'error':
result['failed'] = True
result['msg'] = task.info.error.msg
else:
result['changed'] = True
result['instance'] = gather_vm_facts(content, vm)
return result
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
state=dict(type='str', default='present', choices=['present', 'absent']),
name=dict(type='str'),
name_match=dict(type='str', choices=['first', 'last'], default='first'),
uuid=dict(type='str'),
folder=dict(type='str'),
vnc_ip=dict(type='str', default='0.0.0.0'),
vnc_port=dict(type='int', default=0),
vnc_password=dict(type='str', default='', no_log=True),
datacenter=dict(type='str', default='ha-datacenter')
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[
['name', 'uuid']
],
mutually_exclusive=[
['name', 'uuid']
]
)
result = dict(changed=False, failed=False)
pyv = PyVmomi(module)
vm = pyv.get_vm()
if vm:
result = set_vnc_extraconfig(
pyv.content,
vm,
(module.params['state'] == "present"),
module.params['vnc_ip'],
module.params['vnc_port'],
module.params['vnc_password']
)
else:
module.fail_json(msg="Unable to set VNC config for non-existing virtual machine : '%s'" % (module.params.get('uuid') or
module.params.get('name')))
if result.get('failed') is True:
module.fail_json(**result)
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 |
bearops/ebzl | ebzl/modules/ecs.py | 1 | 4547 | from .. lib import (
ecs,
format as fmt,
parameters
)
from . import (
version
)
import os
import json
import argparse
def get_argument_parser():
parser = argparse.ArgumentParser("ebzl ecs")
parameters.add_profile(parse, required=False)
parameters.add_region(parser, required=False)
subparsers = parser.add_subparsers()
# ebzl ecs create
create_parser = subparsers.add_parser(
"create",
help="register a new task")
create_parser.set_defaults(func=create_task)
create_parser.add_argument("--family", required=True)
create_parser.add_argument("--name", required=True)
create_parser.add_argument("--image", required=True)
create_parser.add_argument("--version", default=version.get_version())
create_parser.add_argument("--command", default="")
create_parser.add_argument("--entrypoint", default=[])
create_parser.add_argument("--cpu", default=0)
create_parser.add_argument("--memory", default=250)
create_parser.add_argument("-v", "--var", action="append")
create_parser.add_argument("-f", "--var-file")
# ebzl ecs run
run_parser = subparsers.add_parser(
"run",
help="run registered task")
run_parser.set_defaults(func=run_task)
run_parser.add_argument("--task", required=True)
run_parser.add_argument("--cluster", default="default")
run_parser.add_argument("--command")
run_parser.add_argument("-v", "--var", action="append")
run_parser.add_argument("-f", "--var-file")
# ebzl ecs tasks
tasks_parser = subparsers.add_parser(
"tasks",
help="list available tasks")
tasks_parser.set_defaults(func=list_tasks)
# ebzl ecs clusters
clusters_parser = subparsers.add_parser(
"clusters",
help="list available clusters")
clusters_parser.set_defaults(func=list_clusters)
return parser
def parse_var_entry(var_entry):
parts = var_entry.strip().split("=")
return {"name": parts[0],
"value": "=".join(parts[1:])}
def parse_var_file(fpath):
if not fpath or not os.path.isfile(fpath):
return []
with open(os.path.expanduser(fpath), "rb") as f:
return map(parse_var_entry, f.readlines())
def get_env_options(args):
env_options = []
env_options.extend(parse_var_file(args.var_file))
if args.var:
env_options.extend(map(parse_var_entry, args.var))
return env_options
def get_container_definition(args):
return {
"name": args.name,
"image": "%s:%s" % (args.image, args.version),
"mountPoints": [],
"volumesFrom": [],
"portMappings": [],
"command": map(str.strip, args.command.split()),
"essential": True,
"entryPoint": args.entrypoint,
"links": [],
"cpu": int(args.cpu),
"memory": int(args.memory),
"environment": get_env_options(args)
}
def create_task(args):
conn = ecs.get_conn(profile=args.profile)
conn.register_task_definition(
family="AtlasCron",
containerDefinitions=[get_container_definition(args)])
def run_task(args):
conn = ecs.get_conn(profile=args.profile)
kwargs = {
"cluster": args.cluster,
"taskDefinition": args.task,
"count": 1,
}
if args.command or args.var or args.var_file:
overrides = {}
task = conn.describe_task_definition(taskDefinition=args.task)
overrides["name"] = (task["taskDefinition"]
["containerDefinitions"]
[0]
["name"])
if args.command:
overrides["command"] = map(str.strip, args.command.split())
env_options = get_env_options(args)
if env_options:
overrides["environment"] = env_options
kwargs["overrides"] = {"containerOverrides": [overrides]}
print conn.run_task(**kwargs)
def list_tasks(args):
conn = ecs.get_conn(profile=args.profile)
tasks = conn.list_task_definitions()
fmt.print_list([arn.split("/")[-1]
for arn in tasks["taskDefinitionArns"]])
def list_clusters(args):
conn = ecs.get_conn(profile=args.profile)
clusters = conn.list_clusters()
fmt.print_list([arn.split("/")[-1]
for arn in clusters["clusterArns"]])
def run(argv):
args = parameters.parse(
parser=get_argument_parser(),
argv=argv,
postprocessors=[parameters.add_default_region])
args.func(args)
| bsd-3-clause |
egiurleo/appinventor-sources | appinventor/misc/emulator-support/aiWinDaemonService.py | 86 | 1932 | __author__ = 'mckinney'
import win32service
import win32serviceutil
import win32api
import win32con
import win32event
import win32evtlogutil
import os, sys, platform, string, time
import servicemanager
class aservice(win32serviceutil.ServiceFramework):
_svc_name_ = "aiDaemonService"
_svc_display_name_ = "AI connection daemon service"
_svc_description_ = "AI Daemon to handle USB and Emulator connections for App Inventor"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
PLATDIR = os.environ["ProgramFiles"]
PLATDIR = '"' + PLATDIR + '"'
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,servicemanager.PYS_SERVICE_STARTED,(self._svc_name_, ''))
self.timeout = 1000 #1 seconds
# This is how long the service will wait to run / refresh itself (see script below)
while 1:
# Wait for service stop signal, if I timeout, loop again
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout)
# Check to see if self.hWaitStop happened
if rc == win32event.WAIT_OBJECT_0:
# Stop signal encountered
servicemanager.LogInfoMsg("aiWinSrvDaemon - STOPPED!") #For Event Log
break
else:
#what to run
try:
file_path = PLATDIR + "\\AppInventor\\aiWinDaemon.exe"
os.system(file_path)
except:
pass
def ctrlHandler(ctrlType):
return True
if __name__ == '__main__':
win32api.SetConsoleCtrlHandler(ctrlHandler, True)
win32serviceutil.HandleCommandLine(aservice) | apache-2.0 |
rosswhitfield/mantid | qt/python/mantidqt/widgets/sliceviewer/test/test_sliceviewer_presenter.py | 3 | 24997 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
import sys
import unittest
from unittest import mock
from unittest.mock import patch
from mantid.api import MultipleExperimentInfos
import matplotlib
matplotlib.use('Agg')
# Mock out simpleapi to import expensive import of something we don't use anyway
sys.modules['mantid.simpleapi'] = mock.MagicMock()
from mantidqt.widgets.sliceviewer.model import SliceViewerModel, WS_TYPE # noqa: E402
from mantidqt.widgets.sliceviewer.presenter import ( # noqa: E402
PeaksViewerCollectionPresenter, SliceViewer)
from mantidqt.widgets.sliceviewer.transform import NonOrthogonalTransform # noqa: E402
from mantidqt.widgets.sliceviewer.toolbar import ToolItemText # noqa: E402
from mantidqt.widgets.sliceviewer.view import SliceViewerView, SliceViewerDataView # noqa: E402
def _create_presenter(model, view, mock_sliceinfo_cls, enable_nonortho_axes, supports_nonortho):
model.get_ws_type = mock.Mock(return_value=WS_TYPE.MDH)
model.is_ragged_matrix_plotted.return_value = False
model.get_dim_limits.return_value = ((-1, 1), (-2, 2))
data_view_mock = view.data_view
data_view_mock.plot_MDH = mock.Mock()
presenter = SliceViewer(None, model=model, view=view)
if enable_nonortho_axes:
data_view_mock.nonorthogonal_mode = True
data_view_mock.nonortho_transform = mock.MagicMock(NonOrthogonalTransform)
data_view_mock.nonortho_transform.tr.return_value = (0, 1)
presenter.nonorthogonal_axes(True)
else:
data_view_mock.nonorthogonal_mode = False
data_view_mock.nonortho_transform = None
data_view_mock.disable_tool_button.reset_mock()
data_view_mock.create_axes_orthogonal.reset_mock()
data_view_mock.create_axes_nonorthogonal.reset_mock()
mock_sliceinfo_instance = mock_sliceinfo_cls.return_value
mock_sliceinfo_instance.can_support_nonorthogonal_axes.return_value = supports_nonortho
return presenter, data_view_mock
def create_workspace_mock():
# Mock out workspace methods needed for SliceViewerModel.__init__
workspace = mock.Mock(spec=MultipleExperimentInfos)
workspace.isMDHistoWorkspace = lambda: False
workspace.getNumDims = lambda: 2
workspace.name = lambda: "workspace"
return workspace
class SliceViewerTest(unittest.TestCase):
def setUp(self):
self.view = mock.Mock(spec=SliceViewerView)
data_view = mock.Mock(spec=SliceViewerDataView)
data_view.plot_MDH = mock.Mock()
data_view.dimensions = mock.Mock()
data_view.norm_opts = mock.Mock()
data_view.image_info_widget = mock.Mock()
data_view.canvas = mock.Mock()
data_view.nonorthogonal_mode = False
data_view.nonortho_transform = None
data_view.get_axes_limits.return_value = None
dimensions = mock.Mock()
dimensions.get_slicepoint.return_value = [None, None, 0.5]
dimensions.transpose = False
dimensions.get_slicerange.return_value = [None, None, (-15, 15)]
dimensions.qflags = [True, True, True]
data_view.dimensions = dimensions
self.view.data_view = data_view
self.model = mock.Mock(spec=SliceViewerModel)
self.model.get_ws = mock.Mock()
self.model.get_data = mock.Mock()
self.model.rebin = mock.Mock()
self.model.workspace_equals = mock.Mock()
self.model.get_properties.return_value = {
"workspace_type": "WS_TYPE.MATRIX",
"supports_normalise": True,
"supports_nonorthogonal_axes": False,
"supports_dynamic_rebinning": False,
"supports_peaks_overlays": True
}
@patch("sip.isdeleted", return_value=False)
def test_sliceviewer_MDH(self, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MDH)
presenter = SliceViewer(None, model=self.model, view=self.view)
# setup calls
self.assertEqual(self.model.get_dimensions_info.call_count, 0)
self.assertEqual(self.model.get_ws.call_count, 1)
self.assertEqual(self.model.get_properties.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.plot_MDH.call_count, 1)
# new_plot
self.model.reset_mock()
self.view.reset_mock()
presenter.new_plot()
self.assertEqual(self.model.get_ws.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.plot_MDH.call_count, 1)
# update_plot_data
self.model.reset_mock()
self.view.reset_mock()
presenter.update_plot_data()
self.assertEqual(self.model.get_data.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.update_plot_data.call_count, 1)
@patch("sip.isdeleted", return_value=False)
def test_sliceviewer_MDE(self, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MDE)
presenter = SliceViewer(None, model=self.model, view=self.view)
# setup calls
self.assertEqual(self.model.get_dimensions_info.call_count, 0)
self.assertEqual(self.model.get_ws_MDE.call_count, 1)
self.assertEqual(self.model.get_properties.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_bin_params.call_count, 1)
self.assertEqual(self.view.data_view.plot_MDH.call_count, 1)
# new_plot
self.model.reset_mock()
self.view.reset_mock()
presenter.new_plot()
self.assertEqual(self.model.get_ws_MDE.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_bin_params.call_count, 1)
self.assertEqual(self.view.data_view.plot_MDH.call_count, 1)
# update_plot_data
self.model.reset_mock()
self.view.reset_mock()
presenter.update_plot_data()
self.assertEqual(self.model.get_data.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_bin_params.call_count, 1)
self.assertEqual(self.view.data_view.update_plot_data.call_count, 1)
@patch("sip.isdeleted", return_value=False)
def test_sliceviewer_matrix(self, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MATRIX)
presenter = SliceViewer(None, model=self.model, view=self.view)
# setup calls
self.assertEqual(self.model.get_dimensions_info.call_count, 0)
self.assertEqual(self.model.get_ws.call_count, 1)
self.assertEqual(self.model.get_properties.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 0)
self.assertEqual(self.view.data_view.plot_matrix.call_count, 1)
# new_plot
self.model.reset_mock()
self.view.reset_mock()
presenter.new_plot()
self.assertEqual(self.model.get_ws.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 0)
self.assertEqual(self.view.data_view.plot_matrix.call_count, 1)
@patch("sip.isdeleted", return_value=False)
def test_normalization_change_set_correct_normalization(self, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MATRIX)
self.view.data_view.plot_matrix = mock.Mock()
presenter = SliceViewer(None, model=self.model, view=self.view)
presenter.normalization_changed("By bin width")
self.view.data_view.plot_matrix.assert_called_with(self.model.get_ws(), distribution=False)
def peaks_button_disabled_if_model_cannot_support_it(self):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MATRIX)
self.model.can_support_peaks_overlay.return_value = False
SliceViewer(None, model=self.model, view=self.view)
self.view.data_view.disable_tool_button.assert_called_once_with(ToolItemText.OVERLAY_PEAKS)
def peaks_button_not_disabled_if_model_can_support_it(self):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MATRIX)
self.model.can_support_peaks_overlay.return_value = True
SliceViewer(None, model=self.model, view=self.view)
self.view.data_view.disable_tool_button.assert_not_called()
@patch("sip.isdeleted", return_value=False)
def test_non_orthogonal_axes_toggled_on(self, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MDE)
self.model.get_dim_limits.return_value = ((-1, 1), (-2, 2))
self.model.is_ragged_matrix_plotted.return_value = False
data_view_mock = self.view.data_view
data_view_mock.plot_MDH = mock.Mock()
presenter = SliceViewer(None, model=self.model, view=self.view)
data_view_mock.plot_MDH.reset_mock() # clear initial plot call
data_view_mock.create_axes_orthogonal.reset_mock()
presenter.nonorthogonal_axes(True)
data_view_mock.deactivate_and_disable_tool.assert_called_once_with(
ToolItemText.REGIONSELECTION)
data_view_mock.create_axes_nonorthogonal.assert_called_once()
data_view_mock.create_axes_orthogonal.assert_not_called()
self.assertEqual(data_view_mock.plot_MDH.call_count, 2)
data_view_mock.disable_tool_button.assert_has_calls([mock.call(ToolItemText.LINEPLOTS)])
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
def test_non_orthogonal_axes_toggled_off(self, mock_sliceinfo_cls, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MDE)
presenter, data_view_mock = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=True,
supports_nonortho=True)
data_view_mock.plot_MDH.reset_mock() # clear initial plot call
data_view_mock.create_axes_orthogonal.reset_mock()
data_view_mock.create_axes_nonorthogonal.reset_mock()
data_view_mock.enable_tool_button.reset_mock()
data_view_mock.disable_tool_button.reset_mock()
data_view_mock.remove_line_plots.reset_mock()
presenter.nonorthogonal_axes(False)
data_view_mock.create_axes_orthogonal.assert_called_once()
data_view_mock.create_axes_nonorthogonal.assert_not_called()
data_view_mock.plot_MDH.assert_called_once()
data_view_mock.enable_tool_button.assert_has_calls(
(mock.call(ToolItemText.LINEPLOTS), mock.call(ToolItemText.REGIONSELECTION)))
@patch("sip.isdeleted", return_value=False)
def test_request_to_show_all_data_sets_correct_limits_on_view_MD(self, _):
presenter = SliceViewer(None, model=self.model, view=self.view)
self.model.is_ragged_matrix_plotted.return_value = False
self.model.get_dim_limits.return_value = ((-1, 1), (-2, 2))
presenter.show_all_data_requested()
data_view = self.view.data_view
self.model.get_dim_limits.assert_called_once_with([None, None, 0.5],
data_view.dimensions.transpose)
data_view.get_full_extent.assert_not_called()
data_view.set_axes_limits.assert_called_once_with((-1, 1), (-2, 2))
@patch("sip.isdeleted", return_value=False)
def test_request_to_show_all_data_sets_correct_limits_on_view_ragged_matrix(self, _):
presenter = SliceViewer(None, model=self.model, view=self.view)
self.model.is_ragged_matrix_plotted.return_value = True
self.view.data_view.get_full_extent.return_value = [-1, 1, -2, 2]
presenter.show_all_data_requested()
data_view = self.view.data_view
self.model.get_dim_limits.assert_not_called()
data_view.set_axes_limits.assert_called_once_with((-1, 1), (-2, 2))
@patch("sip.isdeleted", return_value=False)
def test_data_limits_changed_creates_new_plot_if_dynamic_rebinning_supported(self, _):
presenter = SliceViewer(None, model=self.model, view=self.view)
self.model.can_support_dynamic_rebinning.return_value = True
new_plot_mock = mock.MagicMock()
presenter.new_plot = new_plot_mock
presenter.data_limits_changed()
new_plot_mock.assert_called_once()
@patch("sip.isdeleted", return_value=False)
def test_data_limits_changed_does_not_create_new_plot_if_dynamic_rebinning_not_supported(self, _):
presenter = SliceViewer(None, model=self.model, view=self.view)
self.model.can_support_dynamic_rebinning.return_value = False
new_plot_mock = mock.MagicMock()
presenter.new_plot = new_plot_mock
presenter.data_limits_changed()
new_plot_mock.assert_not_called()
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
def test_changing_dimensions_in_nonortho_mode_switches_to_ortho_when_dim_not_Q(
self, mock_sliceinfo_cls, is_view_delete):
presenter, data_view_mock = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=True,
supports_nonortho=False)
presenter.dimensions_changed()
data_view_mock.disable_tool_button.assert_called_once_with(ToolItemText.NONORTHOGONAL_AXES)
data_view_mock.create_axes_orthogonal.assert_called_once()
data_view_mock.create_axes_nonorthogonal.assert_not_called()
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
def test_changing_dimensions_in_nonortho_mode_keeps_nonortho_when_dim_is_Q(
self, mock_sliceinfo_cls, _):
presenter, data_view_mock = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=True,
supports_nonortho=True)
presenter.dimensions_changed()
data_view_mock.create_axes_nonorthogonal.assert_called_once()
data_view_mock.disable_tool_button.assert_not_called()
data_view_mock.create_axes_orthogonal.assert_not_called()
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
def test_changing_dimensions_in_ortho_mode_disables_nonortho_btn_if_not_supported(
self, mock_sliceinfo_cls, _):
presenter, data_view_mock = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=False,
supports_nonortho=False)
presenter.dimensions_changed()
data_view_mock.disable_tool_button.assert_called_once_with(ToolItemText.NONORTHOGONAL_AXES)
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
def test_changing_dimensions_in_ortho_mode_enables_nonortho_btn_if_supported(
self, mock_sliceinfo_cls, _):
presenter, data_view_mock = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=False,
supports_nonortho=True)
presenter.dimensions_changed()
data_view_mock.enable_tool_button.assert_called_once_with(ToolItemText.NONORTHOGONAL_AXES)
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.peaksviewer.presenter.TableWorkspaceDataPresenterStandard")
@mock.patch("mantidqt.widgets.sliceviewer.presenter.PeaksViewerCollectionPresenter",
spec=PeaksViewerCollectionPresenter)
def test_overlay_peaks_workspaces_attaches_view_and_draws_peaks(self, mock_peaks_presenter, *_):
for nonortho_axes in (False, True):
presenter, _ = _create_presenter(self.model, self.view, mock.MagicMock(), nonortho_axes,
nonortho_axes)
presenter.view.query_peaks_to_overlay.side_effect = ["peaks_workspace"]
presenter.overlay_peaks_workspaces()
presenter.view.query_peaks_to_overlay.assert_called_once()
mock_peaks_presenter.assert_called_once()
mock_peaks_presenter.overlay_peaksworkspaces.asssert_called_once()
mock_peaks_presenter.reset_mock()
presenter.view.query_peaks_to_overlay.reset_mock()
@patch("sip.isdeleted", return_value=False)
def test_gui_starts_with_zoom_selected(self, _):
SliceViewer(None, model=self.model, view=self.view)
self.view.data_view.activate_tool.assert_called_once_with(ToolItemText.ZOOM)
@patch("sip.isdeleted", return_value=False)
def test_replace_workspace_returns_when_the_workspace_is_not_the_model_workspace(self, _):
self.model.workspace_equals.return_value = False
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter.update_view = mock.Mock()
presenter._decide_plot_update_methods = mock.Mock()
other_workspace = mock.Mock()
presenter.replace_workspace('other_workspace', other_workspace)
presenter._decide_plot_update_methods.assert_not_called()
presenter.update_view.assert_not_called()
@patch("sip.isdeleted", return_value=False)
def test_replace_workspace_closes_view_when_model_properties_change(self, _):
self.model.workspace_equals.return_value = True
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter.refresh_view = mock.Mock()
presenter._decide_plot_update_methods = mock.Mock()
workspace = create_workspace_mock()
# Not equivalent to self.model.get_properties()
new_model_properties = {
"workspace_type": "WS_TYPE.MDE",
"supports_normalise": False,
"supports_nonorthogonal_axes": False,
"supports_dynamic_rebinning": False,
"supports_peaks_overlays": True
}
with patch.object(SliceViewerModel, "get_properties", return_value=new_model_properties):
presenter.replace_workspace('workspace', workspace)
self.view.emit_close.assert_called_once()
presenter._decide_plot_update_methods.assert_not_called()
presenter.refresh_view.assert_not_called()
@patch("sip.isdeleted", return_value=False)
def test_replace_workspace_updates_view(self, _):
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
self.view.delayed_refresh = mock.Mock()
presenter._decide_plot_update_methods = mock.Mock(
return_value=(presenter.new_plot_matrix(), presenter.update_plot_data_matrix()))
workspace = create_workspace_mock()
new_model_properties = self.model.get_properties()
# Patch get_properties so that the properties of the new model match those of self.model
with patch.object(SliceViewerModel, "get_properties", return_value=new_model_properties):
presenter.replace_workspace('workspace', workspace)
self.view.emit_close.assert_not_called()
presenter._decide_plot_update_methods.assert_called_once()
self.view.delayed_refresh.assert_called_once()
@patch("sip.isdeleted", return_value=False)
def test_refresh_view(self, _):
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter.new_plot = mock.Mock()
presenter.refresh_view()
self.view.data_view.image_info_widget.setWorkspace.assert_called()
self.view.setWindowTitle.assert_called_with(self.model.get_title())
presenter.new_plot.assert_called_once()
@patch("sip.isdeleted", return_value=True)
def test_refresh_view_does_nothing_when_view_deleted(self, _):
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter.new_plot = mock.Mock()
presenter.refresh_view()
self.view.data_view.image_info_widget.setWorkspace.assert_not_called()
presenter.new_plot.assert_not_called()
@patch("sip.isdeleted", return_value=False)
def test_clear_observer_peaks_presenter_not_none(self, _):
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter._peaks_presenter = mock.MagicMock()
presenter.clear_observer()
presenter._peaks_presenter.clear_observer.assert_called_once()
@patch("sip.isdeleted", return_value=False)
def test_clear_observer_peaks_presenter_is_none(self, _):
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter._peaks_presenter = None
# Will raise exception if misbehaving.
presenter.clear_observer()
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
@mock.patch("mantidqt.widgets.sliceviewer.presenter.PeaksViewerCollectionPresenter",
spec=PeaksViewerCollectionPresenter)
def test_peak_add_delete_event(self, mock_peaks_presenter, mock_sliceinfo_cls, _):
mock_sliceinfo_cls().inverse_transform = mock.Mock(side_effect=lambda pos: pos[::-1])
mock_sliceinfo_cls().z_value = 3
presenter, _ = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=False,
supports_nonortho=True)
presenter._peaks_presenter = mock_peaks_presenter
event = mock.Mock()
event.inaxes = True
event.xdata = 1.0
event.ydata = 2.0
presenter.add_delete_peak(event)
mock_sliceinfo_cls.get_sliceinfo.assert_not_called()
mock_peaks_presenter.add_delete_peak.assert_called_once_with([3, 2, 1])
self.view.data_view.canvas.draw_idle.assert_called_once()
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
SergeyPirogov/ghost | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/lexers/math.py | 291 | 93298 | # -*- coding: utf-8 -*-
"""
pygments.lexers.math
~~~~~~~~~~~~~~~~~~~~
Lexers for math languages.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.util import shebang_matches
from pygments.lexer import Lexer, RegexLexer, bygroups, include, \
combined, do_insertions
from pygments.token import Comment, String, Punctuation, Keyword, Name, \
Operator, Number, Text, Generic
from pygments.lexers.agile import PythonLexer
from pygments.lexers import _scilab_builtins
from pygments.lexers import _stan_builtins
__all__ = ['JuliaLexer', 'JuliaConsoleLexer', 'MuPADLexer', 'MatlabLexer',
'MatlabSessionLexer', 'OctaveLexer', 'ScilabLexer', 'NumPyLexer',
'RConsoleLexer', 'SLexer', 'JagsLexer', 'BugsLexer', 'StanLexer',
'IDLLexer', 'RdLexer', 'IgorLexer']
class JuliaLexer(RegexLexer):
"""
For `Julia <http://julialang.org/>`_ source code.
*New in Pygments 1.6.*
"""
name = 'Julia'
aliases = ['julia','jl']
filenames = ['*.jl']
mimetypes = ['text/x-julia','application/x-julia']
builtins = [
'exit','whos','edit','load','is','isa','isequal','typeof','tuple',
'ntuple','uid','hash','finalizer','convert','promote','subtype',
'typemin','typemax','realmin','realmax','sizeof','eps','promote_type',
'method_exists','applicable','invoke','dlopen','dlsym','system',
'error','throw','assert','new','Inf','Nan','pi','im',
]
tokens = {
'root': [
(r'\n', Text),
(r'[^\S\n]+', Text),
(r'#.*$', Comment),
(r'[]{}:(),;[@]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
# keywords
(r'(begin|while|for|in|return|break|continue|'
r'macro|quote|let|if|elseif|else|try|catch|end|'
r'bitstype|ccall|do|using|module|import|export|'
r'importall|baremodule|immutable)\b', Keyword),
(r'(local|global|const)\b', Keyword.Declaration),
(r'(Bool|Int|Int8|Int16|Int32|Int64|Uint|Uint8|Uint16|Uint32|Uint64'
r'|Float32|Float64|Complex64|Complex128|Any|Nothing|None)\b',
Keyword.Type),
# functions
(r'(function)((?:\s|\\\s)+)',
bygroups(Keyword,Name.Function), 'funcname'),
# types
(r'(type|typealias|abstract)((?:\s|\\\s)+)',
bygroups(Keyword,Name.Class), 'typename'),
# operators
(r'==|!=|<=|>=|->|&&|\|\||::|<:|[-~+/*%=<>&^|.?!$]', Operator),
(r'\.\*|\.\^|\.\\|\.\/|\\', Operator),
# builtins
('(' + '|'.join(builtins) + r')\b', Name.Builtin),
# backticks
(r'`(?s).*?`', String.Backtick),
# chars
(r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,3}|\\u[a-fA-F0-9]{1,4}|"
r"\\U[a-fA-F0-9]{1,6}|[^\\\'\n])'", String.Char),
# try to match trailing transpose
(r'(?<=[.\w\)\]])\'+', Operator),
# strings
(r'(?:[IL])"', String, 'string'),
(r'[E]?"', String, combined('stringescape', 'string')),
# names
(r'@[a-zA-Z0-9_.]+', Name.Decorator),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
# numbers
(r'(\d+(_\d+)+\.\d*|\d*\.\d+(_\d+)+)([eEf][+-]?[0-9]+)?', Number.Float),
(r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
(r'\d+(_\d+)+[eEf][+-]?[0-9]+', Number.Float),
(r'\d+[eEf][+-]?[0-9]+', Number.Float),
(r'0b[01]+(_[01]+)+', Number.Binary),
(r'0b[01]+', Number.Binary),
(r'0o[0-7]+(_[0-7]+)+', Number.Oct),
(r'0o[0-7]+', Number.Oct),
(r'0x[a-fA-F0-9]+(_[a-fA-F0-9]+)+', Number.Hex),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'\d+(_\d+)+', Number.Integer),
(r'\d+', Number.Integer)
],
'funcname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop'),
('\([^\s\w{]{1,2}\)', Operator, '#pop'),
('[^\s\w{]{1,2}', Operator, '#pop'),
],
'typename': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'string': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
(r'\$(\([a-zA-Z0-9_]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?',
String.Interpol),
(r'[^\\"$]+', String),
# quotes, dollar signs, and backslashes must be parsed one at a time
(r'["\\]', String),
# unhandled string formatting sign
(r'\$', String)
],
}
def analyse_text(text):
return shebang_matches(text, r'julia')
line_re = re.compile('.*?\n')
class JuliaConsoleLexer(Lexer):
"""
For Julia console sessions. Modeled after MatlabSessionLexer.
*New in Pygments 1.6.*
"""
name = 'Julia console'
aliases = ['jlcon']
def get_tokens_unprocessed(self, text):
jllexer = JuliaLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('julia>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:3])]))
curcode += line[3:]
elif line.startswith(' '):
idx = len(curcode)
# without is showing error on same line as before...?
line = "\n" + line
token = (0, Generic.Traceback, line)
insertions.append((idx, [token]))
else:
if curcode:
for item in do_insertions(
insertions, jllexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode: # or item:
for item in do_insertions(
insertions, jllexer.get_tokens_unprocessed(curcode)):
yield item
class MuPADLexer(RegexLexer):
"""
A `MuPAD <http://www.mupad.com>`_ lexer.
Contributed by Christopher Creutzig <christopher@creutzig.de>.
*New in Pygments 0.8.*
"""
name = 'MuPAD'
aliases = ['mupad']
filenames = ['*.mu']
tokens = {
'root' : [
(r'//.*?$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
next|break|end|
axiom|end_axiom|category|end_category|domain|end_domain|inherits|
if|%if|then|elif|else|end_if|
case|of|do|otherwise|end_case|
while|end_while|
repeat|until|end_repeat|
for|from|to|downto|step|end_for|
proc|local|option|save|begin|end_proc|
delete|frame
)\b''', Keyword),
(r'''(?x)\b(?:
DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR|
DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT|
DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC|
DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR
)\b''', Name.Class),
(r'''(?x)\b(?:
PI|EULER|E|CATALAN|
NIL|FAIL|undefined|infinity|
TRUE|FALSE|UNKNOWN
)\b''',
Name.Constant),
(r'\b(?:dom|procname)\b', Name.Builtin.Pseudo),
(r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator),
(r'''(?x)\b(?:
and|or|not|xor|
assuming|
div|mod|
union|minus|intersect|in|subset
)\b''',
Operator.Word),
(r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number),
#(r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin),
(r'''(?x)
((?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)
(?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*)(\s*)([(])''',
bygroups(Name.Function, Text, Punctuation)),
(r'''(?x)
(?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)
(?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'.', Text)
],
'comment' : [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
]
}
class MatlabLexer(RegexLexer):
"""
For Matlab source code.
*New in Pygments 0.10.*
"""
name = 'Matlab'
aliases = ['matlab']
filenames = ['*.m']
mimetypes = ['text/matlab']
#
# These lists are generated automatically.
# Run the following in bash shell:
#
# for f in elfun specfun elmat; do
# echo -n "$f = "
# matlab -nojvm -r "help $f;exit;" | perl -ne \
# 'push(@c,$1) if /^ (\w+)\s+-/; END {print q{["}.join(q{","},@c).qq{"]\n};}'
# done
#
# elfun: Elementary math functions
# specfun: Special Math functions
# elmat: Elementary matrices and matrix manipulation
#
# taken from Matlab version 7.4.0.336 (R2007a)
#
elfun = ["sin","sind","sinh","asin","asind","asinh","cos","cosd","cosh",
"acos","acosd","acosh","tan","tand","tanh","atan","atand","atan2",
"atanh","sec","secd","sech","asec","asecd","asech","csc","cscd",
"csch","acsc","acscd","acsch","cot","cotd","coth","acot","acotd",
"acoth","hypot","exp","expm1","log","log1p","log10","log2","pow2",
"realpow","reallog","realsqrt","sqrt","nthroot","nextpow2","abs",
"angle","complex","conj","imag","real","unwrap","isreal","cplxpair",
"fix","floor","ceil","round","mod","rem","sign"]
specfun = ["airy","besselj","bessely","besselh","besseli","besselk","beta",
"betainc","betaln","ellipj","ellipke","erf","erfc","erfcx",
"erfinv","expint","gamma","gammainc","gammaln","psi","legendre",
"cross","dot","factor","isprime","primes","gcd","lcm","rat",
"rats","perms","nchoosek","factorial","cart2sph","cart2pol",
"pol2cart","sph2cart","hsv2rgb","rgb2hsv"]
elmat = ["zeros","ones","eye","repmat","rand","randn","linspace","logspace",
"freqspace","meshgrid","accumarray","size","length","ndims","numel",
"disp","isempty","isequal","isequalwithequalnans","cat","reshape",
"diag","blkdiag","tril","triu","fliplr","flipud","flipdim","rot90",
"find","end","sub2ind","ind2sub","bsxfun","ndgrid","permute",
"ipermute","shiftdim","circshift","squeeze","isscalar","isvector",
"ans","eps","realmax","realmin","pi","i","inf","nan","isnan",
"isinf","isfinite","j","why","compan","gallery","hadamard","hankel",
"hilb","invhilb","magic","pascal","rosser","toeplitz","vander",
"wilkinson"]
tokens = {
'root': [
# line starting with '!' is sent as a system command. not sure what
# label to use...
(r'^!.*', String.Other),
(r'%\{\s*\n', Comment.Multiline, 'blockcomment'),
(r'%.*$', Comment),
(r'^\s*function', Keyword, 'deffunc'),
# from 'iskeyword' on version 7.11 (R2010):
(r'(break|case|catch|classdef|continue|else|elseif|end|enumerated|'
r'events|for|function|global|if|methods|otherwise|parfor|'
r'persistent|properties|return|spmd|switch|try|while)\b', Keyword),
("(" + "|".join(elfun+specfun+elmat) + r')\b', Name.Builtin),
# line continuation with following comment:
(r'\.\.\..*$', Comment),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation),
(r'=|:|;', Punctuation),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w\)\]])\'', Operator),
(r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
(r'\d+[eEf][+-]?[0-9]+', Number.Float),
(r'\d+', Number.Integer),
(r'(?<![\w\)\]])\'', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'.', Text),
],
'string': [
(r'[^\']*\'', String, '#pop')
],
'blockcomment': [
(r'^\s*%\}', Comment.Multiline, '#pop'),
(r'^.*\n', Comment.Multiline),
(r'.', Comment.Multiline),
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation,
Text.Whitespace, Name.Function, Punctuation, Text,
Punctuation, Text.Whitespace), '#pop'),
],
}
def analyse_text(text):
if re.match('^\s*%', text, re.M): # comment
return 0.9
elif re.match('^!\w+', text, re.M): # system cmd
return 0.9
return 0.1
line_re = re.compile('.*?\n')
class MatlabSessionLexer(Lexer):
"""
For Matlab sessions. Modeled after PythonConsoleLexer.
Contributed by Ken Schutte <kschutte@csail.mit.edu>.
*New in Pygments 0.10.*
"""
name = 'Matlab session'
aliases = ['matlabsession']
def get_tokens_unprocessed(self, text):
mlexer = MatlabLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:3])]))
curcode += line[3:]
elif line.startswith('???'):
idx = len(curcode)
# without is showing error on same line as before...?
line = "\n" + line
token = (0, Generic.Traceback, line)
insertions.append((idx, [token]))
else:
if curcode:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode: # or item:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
class OctaveLexer(RegexLexer):
"""
For GNU Octave source code.
*New in Pygments 1.5.*
"""
name = 'Octave'
aliases = ['octave']
filenames = ['*.m']
mimetypes = ['text/octave']
# These lists are generated automatically.
# Run the following in bash shell:
#
# First dump all of the Octave manual into a plain text file:
#
# $ info octave --subnodes -o octave-manual
#
# Now grep through it:
# for i in \
# "Built-in Function" "Command" "Function File" \
# "Loadable Function" "Mapping Function";
# do
# perl -e '@name = qw('"$i"');
# print lc($name[0]),"_kw = [\n"';
#
# perl -n -e 'print "\"$1\",\n" if /-- '"$i"': .* (\w*) \(/;' \
# octave-manual | sort | uniq ;
# echo "]" ;
# echo;
# done
# taken from Octave Mercurial changeset 8cc154f45e37 (30-jan-2011)
builtin_kw = [ "addlistener", "addpath", "addproperty", "all",
"and", "any", "argnames", "argv", "assignin",
"atexit", "autoload",
"available_graphics_toolkits", "beep_on_error",
"bitand", "bitmax", "bitor", "bitshift", "bitxor",
"cat", "cell", "cellstr", "char", "class", "clc",
"columns", "command_line_path",
"completion_append_char", "completion_matches",
"complex", "confirm_recursive_rmdir", "cputime",
"crash_dumps_octave_core", "ctranspose", "cumprod",
"cumsum", "debug_on_error", "debug_on_interrupt",
"debug_on_warning", "default_save_options",
"dellistener", "diag", "diff", "disp",
"doc_cache_file", "do_string_escapes", "double",
"drawnow", "e", "echo_executing_commands", "eps",
"eq", "errno", "errno_list", "error", "eval",
"evalin", "exec", "exist", "exit", "eye", "false",
"fclear", "fclose", "fcntl", "fdisp", "feof",
"ferror", "feval", "fflush", "fgetl", "fgets",
"fieldnames", "file_in_loadpath", "file_in_path",
"filemarker", "filesep", "find_dir_in_path",
"fixed_point_format", "fnmatch", "fopen", "fork",
"formula", "fprintf", "fputs", "fread", "freport",
"frewind", "fscanf", "fseek", "fskipl", "ftell",
"functions", "fwrite", "ge", "genpath", "get",
"getegid", "getenv", "geteuid", "getgid",
"getpgrp", "getpid", "getppid", "getuid", "glob",
"gt", "gui_mode", "history_control",
"history_file", "history_size",
"history_timestamp_format_string", "home",
"horzcat", "hypot", "ifelse",
"ignore_function_time_stamp", "inferiorto",
"info_file", "info_program", "inline", "input",
"intmax", "intmin", "ipermute",
"is_absolute_filename", "isargout", "isbool",
"iscell", "iscellstr", "ischar", "iscomplex",
"isempty", "isfield", "isfloat", "isglobal",
"ishandle", "isieee", "isindex", "isinteger",
"islogical", "ismatrix", "ismethod", "isnull",
"isnumeric", "isobject", "isreal",
"is_rooted_relative_filename", "issorted",
"isstruct", "isvarname", "kbhit", "keyboard",
"kill", "lasterr", "lasterror", "lastwarn",
"ldivide", "le", "length", "link", "linspace",
"logical", "lstat", "lt", "make_absolute_filename",
"makeinfo_program", "max_recursion_depth", "merge",
"methods", "mfilename", "minus", "mislocked",
"mkdir", "mkfifo", "mkstemp", "mldivide", "mlock",
"mouse_wheel_zoom", "mpower", "mrdivide", "mtimes",
"munlock", "nargin", "nargout",
"native_float_format", "ndims", "ne", "nfields",
"nnz", "norm", "not", "numel", "nzmax",
"octave_config_info", "octave_core_file_limit",
"octave_core_file_name",
"octave_core_file_options", "ones", "or",
"output_max_field_width", "output_precision",
"page_output_immediately", "page_screen_output",
"path", "pathsep", "pause", "pclose", "permute",
"pi", "pipe", "plus", "popen", "power",
"print_empty_dimensions", "printf",
"print_struct_array_contents", "prod",
"program_invocation_name", "program_name",
"putenv", "puts", "pwd", "quit", "rats", "rdivide",
"readdir", "readlink", "read_readline_init_file",
"realmax", "realmin", "rehash", "rename",
"repelems", "re_read_readline_init_file", "reset",
"reshape", "resize", "restoredefaultpath",
"rethrow", "rmdir", "rmfield", "rmpath", "rows",
"save_header_format_string", "save_precision",
"saving_history", "scanf", "set", "setenv",
"shell_cmd", "sighup_dumps_octave_core",
"sigterm_dumps_octave_core", "silent_functions",
"single", "size", "size_equal", "sizemax",
"sizeof", "sleep", "source", "sparse_auto_mutate",
"split_long_rows", "sprintf", "squeeze", "sscanf",
"stat", "stderr", "stdin", "stdout", "strcmp",
"strcmpi", "string_fill_char", "strncmp",
"strncmpi", "struct", "struct_levels_to_print",
"strvcat", "subsasgn", "subsref", "sum", "sumsq",
"superiorto", "suppress_verbose_help_message",
"symlink", "system", "tic", "tilde_expand",
"times", "tmpfile", "tmpnam", "toc", "toupper",
"transpose", "true", "typeinfo", "umask", "uminus",
"uname", "undo_string_escapes", "unlink", "uplus",
"upper", "usage", "usleep", "vec", "vectorize",
"vertcat", "waitpid", "warning", "warranty",
"whos_line_format", "yes_or_no", "zeros",
"inf", "Inf", "nan", "NaN"]
command_kw = [ "close", "load", "who", "whos", ]
function_kw = [ "accumarray", "accumdim", "acosd", "acotd",
"acscd", "addtodate", "allchild", "ancestor",
"anova", "arch_fit", "arch_rnd", "arch_test",
"area", "arma_rnd", "arrayfun", "ascii", "asctime",
"asecd", "asind", "assert", "atand",
"autoreg_matrix", "autumn", "axes", "axis", "bar",
"barh", "bartlett", "bartlett_test", "beep",
"betacdf", "betainv", "betapdf", "betarnd",
"bicgstab", "bicubic", "binary", "binocdf",
"binoinv", "binopdf", "binornd", "bitcmp",
"bitget", "bitset", "blackman", "blanks",
"blkdiag", "bone", "box", "brighten", "calendar",
"cast", "cauchy_cdf", "cauchy_inv", "cauchy_pdf",
"cauchy_rnd", "caxis", "celldisp", "center", "cgs",
"chisquare_test_homogeneity",
"chisquare_test_independence", "circshift", "cla",
"clabel", "clf", "clock", "cloglog", "closereq",
"colon", "colorbar", "colormap", "colperm",
"comet", "common_size", "commutation_matrix",
"compan", "compare_versions", "compass",
"computer", "cond", "condest", "contour",
"contourc", "contourf", "contrast", "conv",
"convhull", "cool", "copper", "copyfile", "cor",
"corrcoef", "cor_test", "cosd", "cotd", "cov",
"cplxpair", "cross", "cscd", "cstrcat", "csvread",
"csvwrite", "ctime", "cumtrapz", "curl", "cut",
"cylinder", "date", "datenum", "datestr",
"datetick", "datevec", "dblquad", "deal",
"deblank", "deconv", "delaunay", "delaunayn",
"delete", "demo", "detrend", "diffpara", "diffuse",
"dir", "discrete_cdf", "discrete_inv",
"discrete_pdf", "discrete_rnd", "display",
"divergence", "dlmwrite", "dos", "dsearch",
"dsearchn", "duplication_matrix", "durbinlevinson",
"ellipsoid", "empirical_cdf", "empirical_inv",
"empirical_pdf", "empirical_rnd", "eomday",
"errorbar", "etime", "etreeplot", "example",
"expcdf", "expinv", "expm", "exppdf", "exprnd",
"ezcontour", "ezcontourf", "ezmesh", "ezmeshc",
"ezplot", "ezpolar", "ezsurf", "ezsurfc", "factor",
"factorial", "fail", "fcdf", "feather", "fftconv",
"fftfilt", "fftshift", "figure", "fileattrib",
"fileparts", "fill", "findall", "findobj",
"findstr", "finv", "flag", "flipdim", "fliplr",
"flipud", "fpdf", "fplot", "fractdiff", "freqz",
"freqz_plot", "frnd", "fsolve",
"f_test_regression", "ftp", "fullfile", "fzero",
"gamcdf", "gaminv", "gampdf", "gamrnd", "gca",
"gcbf", "gcbo", "gcf", "genvarname", "geocdf",
"geoinv", "geopdf", "geornd", "getfield", "ginput",
"glpk", "gls", "gplot", "gradient",
"graphics_toolkit", "gray", "grid", "griddata",
"griddatan", "gtext", "gunzip", "gzip", "hadamard",
"hamming", "hankel", "hanning", "hggroup",
"hidden", "hilb", "hist", "histc", "hold", "hot",
"hotelling_test", "housh", "hsv", "hurst",
"hygecdf", "hygeinv", "hygepdf", "hygernd",
"idivide", "ifftshift", "image", "imagesc",
"imfinfo", "imread", "imshow", "imwrite", "index",
"info", "inpolygon", "inputname", "interpft",
"interpn", "intersect", "invhilb", "iqr", "isa",
"isdefinite", "isdir", "is_duplicate_entry",
"isequal", "isequalwithequalnans", "isfigure",
"ishermitian", "ishghandle", "is_leap_year",
"isletter", "ismac", "ismember", "ispc", "isprime",
"isprop", "isscalar", "issquare", "isstrprop",
"issymmetric", "isunix", "is_valid_file_id",
"isvector", "jet", "kendall",
"kolmogorov_smirnov_cdf",
"kolmogorov_smirnov_test", "kruskal_wallis_test",
"krylov", "kurtosis", "laplace_cdf", "laplace_inv",
"laplace_pdf", "laplace_rnd", "legend", "legendre",
"license", "line", "linkprop", "list_primes",
"loadaudio", "loadobj", "logistic_cdf",
"logistic_inv", "logistic_pdf", "logistic_rnd",
"logit", "loglog", "loglogerr", "logm", "logncdf",
"logninv", "lognpdf", "lognrnd", "logspace",
"lookfor", "ls_command", "lsqnonneg", "magic",
"mahalanobis", "manova", "matlabroot",
"mcnemar_test", "mean", "meansq", "median", "menu",
"mesh", "meshc", "meshgrid", "meshz", "mexext",
"mget", "mkpp", "mode", "moment", "movefile",
"mpoles", "mput", "namelengthmax", "nargchk",
"nargoutchk", "nbincdf", "nbininv", "nbinpdf",
"nbinrnd", "nchoosek", "ndgrid", "newplot", "news",
"nonzeros", "normcdf", "normest", "norminv",
"normpdf", "normrnd", "now", "nthroot", "null",
"ocean", "ols", "onenormest", "optimget",
"optimset", "orderfields", "orient", "orth",
"pack", "pareto", "parseparams", "pascal", "patch",
"pathdef", "pcg", "pchip", "pcolor", "pcr",
"peaks", "periodogram", "perl", "perms", "pie",
"pink", "planerot", "playaudio", "plot",
"plotmatrix", "plotyy", "poisscdf", "poissinv",
"poisspdf", "poissrnd", "polar", "poly",
"polyaffine", "polyarea", "polyderiv", "polyfit",
"polygcd", "polyint", "polyout", "polyreduce",
"polyval", "polyvalm", "postpad", "powerset",
"ppder", "ppint", "ppjumps", "ppplot", "ppval",
"pqpnonneg", "prepad", "primes", "print",
"print_usage", "prism", "probit", "qp", "qqplot",
"quadcc", "quadgk", "quadl", "quadv", "quiver",
"qzhess", "rainbow", "randi", "range", "rank",
"ranks", "rat", "reallog", "realpow", "realsqrt",
"record", "rectangle_lw", "rectangle_sw",
"rectint", "refresh", "refreshdata",
"regexptranslate", "repmat", "residue", "ribbon",
"rindex", "roots", "rose", "rosser", "rotdim",
"rref", "run", "run_count", "rundemos", "run_test",
"runtests", "saveas", "saveaudio", "saveobj",
"savepath", "scatter", "secd", "semilogx",
"semilogxerr", "semilogy", "semilogyerr",
"setaudio", "setdiff", "setfield", "setxor",
"shading", "shift", "shiftdim", "sign_test",
"sinc", "sind", "sinetone", "sinewave", "skewness",
"slice", "sombrero", "sortrows", "spaugment",
"spconvert", "spdiags", "spearman", "spectral_adf",
"spectral_xdf", "specular", "speed", "spencer",
"speye", "spfun", "sphere", "spinmap", "spline",
"spones", "sprand", "sprandn", "sprandsym",
"spring", "spstats", "spy", "sqp", "stairs",
"statistics", "std", "stdnormal_cdf",
"stdnormal_inv", "stdnormal_pdf", "stdnormal_rnd",
"stem", "stft", "strcat", "strchr", "strjust",
"strmatch", "strread", "strsplit", "strtok",
"strtrim", "strtrunc", "structfun", "studentize",
"subplot", "subsindex", "subspace", "substr",
"substruct", "summer", "surf", "surface", "surfc",
"surfl", "surfnorm", "svds", "swapbytes",
"sylvester_matrix", "symvar", "synthesis", "table",
"tand", "tar", "tcdf", "tempdir", "tempname",
"test", "text", "textread", "textscan", "tinv",
"title", "toeplitz", "tpdf", "trace", "trapz",
"treelayout", "treeplot", "triangle_lw",
"triangle_sw", "tril", "trimesh", "triplequad",
"triplot", "trisurf", "triu", "trnd", "tsearchn",
"t_test", "t_test_regression", "type", "unidcdf",
"unidinv", "unidpdf", "unidrnd", "unifcdf",
"unifinv", "unifpdf", "unifrnd", "union", "unique",
"unix", "unmkpp", "unpack", "untabify", "untar",
"unwrap", "unzip", "u_test", "validatestring",
"vander", "var", "var_test", "vech", "ver",
"version", "view", "voronoi", "voronoin",
"waitforbuttonpress", "wavread", "wavwrite",
"wblcdf", "wblinv", "wblpdf", "wblrnd", "weekday",
"welch_test", "what", "white", "whitebg",
"wienrnd", "wilcoxon_test", "wilkinson", "winter",
"xlabel", "xlim", "ylabel", "yulewalker", "zip",
"zlabel", "z_test", ]
loadable_kw = [ "airy", "amd", "balance", "besselh", "besseli",
"besselj", "besselk", "bessely", "bitpack",
"bsxfun", "builtin", "ccolamd", "cellfun",
"cellslices", "chol", "choldelete", "cholinsert",
"cholinv", "cholshift", "cholupdate", "colamd",
"colloc", "convhulln", "convn", "csymamd",
"cummax", "cummin", "daspk", "daspk_options",
"dasrt", "dasrt_options", "dassl", "dassl_options",
"dbclear", "dbdown", "dbstack", "dbstatus",
"dbstop", "dbtype", "dbup", "dbwhere", "det",
"dlmread", "dmperm", "dot", "eig", "eigs",
"endgrent", "endpwent", "etree", "fft", "fftn",
"fftw", "filter", "find", "full", "gcd",
"getgrent", "getgrgid", "getgrnam", "getpwent",
"getpwnam", "getpwuid", "getrusage", "givens",
"gmtime", "gnuplot_binary", "hess", "ifft",
"ifftn", "inv", "isdebugmode", "issparse", "kron",
"localtime", "lookup", "lsode", "lsode_options",
"lu", "luinc", "luupdate", "matrix_type", "max",
"min", "mktime", "pinv", "qr", "qrdelete",
"qrinsert", "qrshift", "qrupdate", "quad",
"quad_options", "qz", "rand", "rande", "randg",
"randn", "randp", "randperm", "rcond", "regexp",
"regexpi", "regexprep", "schur", "setgrent",
"setpwent", "sort", "spalloc", "sparse", "spparms",
"sprank", "sqrtm", "strfind", "strftime",
"strptime", "strrep", "svd", "svd_driver", "syl",
"symamd", "symbfact", "symrcm", "time", "tsearch",
"typecast", "urlread", "urlwrite", ]
mapping_kw = [ "abs", "acos", "acosh", "acot", "acoth", "acsc",
"acsch", "angle", "arg", "asec", "asech", "asin",
"asinh", "atan", "atanh", "beta", "betainc",
"betaln", "bincoeff", "cbrt", "ceil", "conj", "cos",
"cosh", "cot", "coth", "csc", "csch", "erf", "erfc",
"erfcx", "erfinv", "exp", "finite", "fix", "floor",
"fmod", "gamma", "gammainc", "gammaln", "imag",
"isalnum", "isalpha", "isascii", "iscntrl",
"isdigit", "isfinite", "isgraph", "isinf",
"islower", "isna", "isnan", "isprint", "ispunct",
"isspace", "isupper", "isxdigit", "lcm", "lgamma",
"log", "lower", "mod", "real", "rem", "round",
"roundb", "sec", "sech", "sign", "sin", "sinh",
"sqrt", "tan", "tanh", "toascii", "tolower", "xor",
]
builtin_consts = [ "EDITOR", "EXEC_PATH", "I", "IMAGE_PATH", "NA",
"OCTAVE_HOME", "OCTAVE_VERSION", "PAGER",
"PAGER_FLAGS", "SEEK_CUR", "SEEK_END", "SEEK_SET",
"SIG", "S_ISBLK", "S_ISCHR", "S_ISDIR", "S_ISFIFO",
"S_ISLNK", "S_ISREG", "S_ISSOCK", "WCONTINUE",
"WCOREDUMP", "WEXITSTATUS", "WIFCONTINUED",
"WIFEXITED", "WIFSIGNALED", "WIFSTOPPED", "WNOHANG",
"WSTOPSIG", "WTERMSIG", "WUNTRACED", ]
tokens = {
'root': [
#We should look into multiline comments
(r'[%#].*$', Comment),
(r'^\s*function', Keyword, 'deffunc'),
# from 'iskeyword' on hg changeset 8cc154f45e37
(r'(__FILE__|__LINE__|break|case|catch|classdef|continue|do|else|'
r'elseif|end|end_try_catch|end_unwind_protect|endclassdef|'
r'endevents|endfor|endfunction|endif|endmethods|endproperties|'
r'endswitch|endwhile|events|for|function|get|global|if|methods|'
r'otherwise|persistent|properties|return|set|static|switch|try|'
r'until|unwind_protect|unwind_protect_cleanup|while)\b', Keyword),
("(" + "|".join( builtin_kw + command_kw
+ function_kw + loadable_kw
+ mapping_kw) + r')\b', Name.Builtin),
("(" + "|".join(builtin_consts) + r')\b', Name.Constant),
# operators in Octave but not Matlab:
(r'-=|!=|!|/=|--', Operator),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators in Octave but not Matlab requiring escape for re:
(r'\*=|\+=|\^=|\/=|\\=|\*\*|\+\+|\.\*\*',Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation),
(r'=|:|;', Punctuation),
(r'"[^"]*"', String),
(r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
(r'\d+[eEf][+-]?[0-9]+', Number.Float),
(r'\d+', Number.Integer),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w\)\]])\'', Operator),
(r'(?<![\w\)\]])\'', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'.', Text),
],
'string': [
(r"[^']*'", String, '#pop'),
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation,
Text.Whitespace, Name.Function, Punctuation, Text,
Punctuation, Text.Whitespace), '#pop'),
],
}
def analyse_text(text):
if re.match('^\s*[%#]', text, re.M): #Comment
return 0.1
class ScilabLexer(RegexLexer):
"""
For Scilab source code.
*New in Pygments 1.5.*
"""
name = 'Scilab'
aliases = ['scilab']
filenames = ['*.sci', '*.sce', '*.tst']
mimetypes = ['text/scilab']
tokens = {
'root': [
(r'//.*?$', Comment.Single),
(r'^\s*function', Keyword, 'deffunc'),
(r'(__FILE__|__LINE__|break|case|catch|classdef|continue|do|else|'
r'elseif|end|end_try_catch|end_unwind_protect|endclassdef|'
r'endevents|endfor|endfunction|endif|endmethods|endproperties|'
r'endswitch|endwhile|events|for|function|get|global|if|methods|'
r'otherwise|persistent|properties|return|set|static|switch|try|'
r'until|unwind_protect|unwind_protect_cleanup|while)\b', Keyword),
("(" + "|".join(_scilab_builtins.functions_kw +
_scilab_builtins.commands_kw +
_scilab_builtins.macros_kw
) + r')\b', Name.Builtin),
(r'(%s)\b' % "|".join(map(re.escape, _scilab_builtins.builtin_consts)),
Name.Constant),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'[\[\](){}@.,=:;]', Punctuation),
(r'"[^"]*"', String),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w\)\]])\'', Operator),
(r'(?<![\w\)\]])\'', String, 'string'),
(r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
(r'\d+[eEf][+-]?[0-9]+', Number.Float),
(r'\d+', Number.Integer),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'.', Text),
],
'string': [
(r"[^']*'", String, '#pop'),
(r'.', String, '#pop'),
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation,
Text.Whitespace, Name.Function, Punctuation, Text,
Punctuation, Text.Whitespace), '#pop'),
],
}
class NumPyLexer(PythonLexer):
"""
A Python lexer recognizing Numerical Python builtins.
*New in Pygments 0.10.*
"""
name = 'NumPy'
aliases = ['numpy']
# override the mimetypes to not inherit them from python
mimetypes = []
filenames = []
EXTRA_KEYWORDS = set([
'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose',
'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append',
'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh',
'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin',
'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal',
'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange',
'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray',
'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett',
'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial',
'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman',
'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_',
'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type',
'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate',
'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov',
'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate',
'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide',
'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty',
'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye',
'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill',
'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud',
'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer',
'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring',
'generic', 'get_array_wrap', 'get_include', 'get_numarray_include',
'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize',
'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater',
'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram',
'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0',
'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info',
'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d',
'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj',
'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf',
'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_',
'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_',
'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort',
'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace',
'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype',
'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min',
'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan',
'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum',
'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer',
'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones',
'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload',
'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv',
'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers',
'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close',
'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require',
'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll',
'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_',
'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select',
'set_numeric_ops', 'set_printoptions', 'set_string_function',
'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj',
'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape',
'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh',
'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source',
'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std',
'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot',
'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose',
'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict',
'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index',
'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises',
'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like'
])
def get_tokens_unprocessed(self, text):
for index, token, value in \
PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Keyword.Pseudo, value
else:
yield index, token, value
def analyse_text(text):
return (shebang_matches(text, r'pythonw?(2(\.\d)?)?') or
'import ' in text[:1000]) \
and ('import numpy' in text or 'from numpy import' in text)
class RConsoleLexer(Lexer):
"""
For R console transcripts or R CMD BATCH output files.
"""
name = 'RConsole'
aliases = ['rconsole', 'rout']
filenames = ['*.Rout']
def get_tokens_unprocessed(self, text):
slexer = SLexer(**self.options)
current_code_block = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>') or line.startswith('+'):
# Colorize the prompt as such,
# then put rest of line into current_code_block
insertions.append((len(current_code_block),
[(0, Generic.Prompt, line[:2])]))
current_code_block += line[2:]
else:
# We have reached a non-prompt line!
# If we have stored prompt lines, need to process them first.
if current_code_block:
# Weave together the prompts and highlight code.
for item in do_insertions(insertions,
slexer.get_tokens_unprocessed(current_code_block)):
yield item
# Reset vars for next code block.
current_code_block = ''
insertions = []
# Now process the actual line itself, this is output from R.
yield match.start(), Generic.Output, line
# If we happen to end on a code block with nothing after it, need to
# process the last code block. This is neither elegant nor DRY so
# should be changed.
if current_code_block:
for item in do_insertions(insertions,
slexer.get_tokens_unprocessed(current_code_block)):
yield item
class SLexer(RegexLexer):
"""
For S, S-plus, and R source code.
*New in Pygments 0.10.*
"""
name = 'S'
aliases = ['splus', 's', 'r']
filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile']
mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r',
'text/x-R', 'text/x-r-history', 'text/x-r-profile']
tokens = {
'comments': [
(r'#.*$', Comment.Single),
],
'valid_name': [
(r'[a-zA-Z][0-9a-zA-Z\._]*', Text),
# can begin with ., but not if that is followed by a digit
(r'\.[a-zA-Z_][0-9a-zA-Z\._]*', Text),
],
'punctuation': [
(r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation),
],
'keywords': [
(r'(if|else|for|while|repeat|in|next|break|return|switch|function)'
r'(?![0-9a-zA-Z\._])',
Keyword.Reserved)
],
'operators': [
(r'<<?-|->>?|-|==|<=|>=|<|>|&&?|!=|\|\|?|\?', Operator),
(r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator)
],
'builtin_symbols': [
(r'(NULL|NA(_(integer|real|complex|character)_)?|'
r'Inf|TRUE|FALSE|NaN|\.\.(\.|[0-9]+))'
r'(?![0-9a-zA-Z\._])',
Keyword.Constant),
(r'(T|F)\b', Keyword.Variable),
],
'numbers': [
# hex number
(r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex),
# decimal number
(r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+)([eE][+-]?[0-9]+)?[Li]?',
Number),
],
'statements': [
include('comments'),
# whitespaces
(r'\s+', Text),
(r'`.*?`', String.Backtick),
(r'\'', String, 'string_squote'),
(r'\"', String, 'string_dquote'),
include('builtin_symbols'),
include('numbers'),
include('keywords'),
include('punctuation'),
include('operators'),
include('valid_name'),
],
'root': [
include('statements'),
# blocks:
(r'\{|\}', Punctuation),
#(r'\{', Punctuation, 'block'),
(r'.', Text),
],
#'block': [
# include('statements'),
# ('\{', Punctuation, '#push'),
# ('\}', Punctuation, '#pop')
#],
'string_squote': [
(r'([^\'\\]|\\.)*\'', String, '#pop'),
],
'string_dquote': [
(r'([^"\\]|\\.)*"', String, '#pop'),
],
}
def analyse_text(text):
if re.search(r'[a-z0-9_\])\s]<-(?!-)', text):
return 0.11
class BugsLexer(RegexLexer):
"""
Pygments Lexer for `OpenBugs <http://www.openbugs.info/w/>`_ and WinBugs
models.
*New in Pygments 1.6.*
"""
name = 'BUGS'
aliases = ['bugs', 'winbugs', 'openbugs']
filenames = ['*.bug']
_FUNCTIONS = [
# Scalar functions
'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
'cloglog', 'cos', 'cosh', 'cumulative', 'cut', 'density', 'deviance',
'equals', 'expr', 'gammap', 'ilogit', 'icloglog', 'integral', 'log',
'logfact', 'loggam', 'logit', 'max', 'min', 'phi', 'post.p.value',
'pow', 'prior.p.value', 'probit', 'replicate.post', 'replicate.prior',
'round', 'sin', 'sinh', 'solution', 'sqrt', 'step', 'tan', 'tanh',
'trunc',
# Vector functions
'inprod', 'interp.lin', 'inverse', 'logdet', 'mean', 'eigen.vals',
'ode', 'prod', 'p.valueM', 'rank', 'ranked', 'replicate.postM',
'sd', 'sort', 'sum',
## Special
'D', 'I', 'F', 'T', 'C']
""" OpenBUGS built-in functions
From http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAII
This also includes
- T, C, I : Truncation and censoring.
``T`` and ``C`` are in OpenBUGS. ``I`` in WinBUGS.
- D : ODE
- F : Functional http://www.openbugs.info/Examples/Functionals.html
"""
_DISTRIBUTIONS = ['dbern', 'dbin', 'dcat', 'dnegbin', 'dpois',
'dhyper', 'dbeta', 'dchisqr', 'ddexp', 'dexp',
'dflat', 'dgamma', 'dgev', 'df', 'dggamma', 'dgpar',
'dloglik', 'dlnorm', 'dlogis', 'dnorm', 'dpar',
'dt', 'dunif', 'dweib', 'dmulti', 'ddirch', 'dmnorm',
'dmt', 'dwish']
""" OpenBUGS built-in distributions
Functions from
http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAI
"""
tokens = {
'whitespace' : [
(r"\s+", Text),
],
'comments' : [
# Comments
(r'#.*$', Comment.Single),
],
'root': [
# Comments
include('comments'),
include('whitespace'),
# Block start
(r'(model)(\s+)({)',
bygroups(Keyword.Namespace, Text, Punctuation)),
# Reserved Words
(r'(for|in)(?![0-9a-zA-Z\._])', Keyword.Reserved),
# Built-in Functions
(r'(%s)(?=\s*\()'
% r'|'.join(_FUNCTIONS + _DISTRIBUTIONS),
Name.Builtin),
# Regular variable names
(r'[A-Za-z][A-Za-z0-9_.]*', Name),
# Number Literals
(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
# Punctuation
(r'\[|\]|\(|\)|:|,|;', Punctuation),
# Assignment operators
# SLexer makes these tokens Operators.
(r'<-|~', Operator),
# Infix and prefix operators
(r'\+|-|\*|/', Operator),
# Block
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r"^\s*model\s*{", text, re.M):
return 0.7
else:
return 0.0
class JagsLexer(RegexLexer):
"""
Pygments Lexer for JAGS.
*New in Pygments 1.6.*
"""
name = 'JAGS'
aliases = ['jags']
filenames = ['*.jag', '*.bug']
## JAGS
_FUNCTIONS = [
'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
'cos', 'cosh', 'cloglog',
'equals', 'exp', 'icloglog', 'ifelse', 'ilogit', 'log', 'logfact',
'loggam', 'logit', 'phi', 'pow', 'probit', 'round', 'sin', 'sinh',
'sqrt', 'step', 'tan', 'tanh', 'trunc', 'inprod', 'interp.lin',
'logdet', 'max', 'mean', 'min', 'prod', 'sum', 'sd', 'inverse',
'rank', 'sort', 't', 'acos', 'acosh', 'asin', 'asinh', 'atan',
# Truncation/Censoring (should I include)
'T', 'I']
# Distributions with density, probability and quartile functions
_DISTRIBUTIONS = ['[dpq]%s' % x for x in
['bern', 'beta', 'dchiqsqr', 'ddexp', 'dexp',
'df', 'gamma', 'gen.gamma', 'logis', 'lnorm',
'negbin', 'nchisqr', 'norm', 'par', 'pois', 'weib']]
# Other distributions without density and probability
_OTHER_DISTRIBUTIONS = [
'dt', 'dunif', 'dbetabin', 'dbern', 'dbin', 'dcat', 'dhyper',
'ddirch', 'dmnorm', 'dwish', 'dmt', 'dmulti', 'dbinom', 'dchisq',
'dnbinom', 'dweibull', 'ddirich']
tokens = {
'whitespace' : [
(r"\s+", Text),
],
'names' : [
# Regular variable names
(r'[a-zA-Z][a-zA-Z0-9_.]*\b', Name),
],
'comments' : [
# do not use stateful comments
(r'(?s)/\*.*?\*/', Comment.Multiline),
# Comments
(r'#.*$', Comment.Single),
],
'root': [
# Comments
include('comments'),
include('whitespace'),
# Block start
(r'(model|data)(\s+)({)',
bygroups(Keyword.Namespace, Text, Punctuation)),
(r'var(?![0-9a-zA-Z\._])', Keyword.Declaration),
# Reserved Words
(r'(for|in)(?![0-9a-zA-Z\._])', Keyword.Reserved),
# Builtins
# Need to use lookahead because . is a valid char
(r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS
+ _DISTRIBUTIONS
+ _OTHER_DISTRIBUTIONS),
Name.Builtin),
# Names
include('names'),
# Number Literals
(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
(r'\[|\]|\(|\)|:|,|;', Punctuation),
# Assignment operators
(r'<-|~', Operator),
# # JAGS includes many more than OpenBUGS
(r'\+|-|\*|\/|\|\|[&]{2}|[<>=]=?|\^|%.*?%', Operator),
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r'^\s*model\s*\{', text, re.M):
if re.search(r'^\s*data\s*\{', text, re.M):
return 0.9
elif re.search(r'^\s*var', text, re.M):
return 0.9
else:
return 0.3
else:
return 0
class StanLexer(RegexLexer):
"""Pygments Lexer for Stan models.
The Stan modeling language is specified in the *Stan 1.3.0
Modeling Language Manual* `pdf
<http://code.google.com/p/stan/downloads/detail?name=stan-reference-1.3.0.pdf>`_.
*New in Pygments 1.6.*
"""
name = 'Stan'
aliases = ['stan']
filenames = ['*.stan']
tokens = {
'whitespace' : [
(r"\s+", Text),
],
'comments' : [
(r'(?s)/\*.*?\*/', Comment.Multiline),
# Comments
(r'(//|#).*$', Comment.Single),
],
'root': [
# Stan is more restrictive on strings than this regex
(r'"[^"]*"', String),
# Comments
include('comments'),
# block start
include('whitespace'),
# Block start
(r'(%s)(\s*)({)' %
r'|'.join(('data', r'transformed\s+?data',
'parameters', r'transformed\s+parameters',
'model', r'generated\s+quantities')),
bygroups(Keyword.Namespace, Text, Punctuation)),
# Reserved Words
(r'(%s)\b' % r'|'.join(_stan_builtins.KEYWORDS), Keyword),
# Truncation
(r'T(?=\s*\[)', Keyword),
# Data types
(r'(%s)\b' % r'|'.join(_stan_builtins.TYPES), Keyword.Type),
# Punctuation
(r"[;:,\[\]()]", Punctuation),
# Builtin
(r'(%s)(?=\s*\()'
% r'|'.join(_stan_builtins.FUNCTIONS
+ _stan_builtins.DISTRIBUTIONS),
Name.Builtin),
# Special names ending in __, like lp__
(r'[A-Za-z][A-Za-z0-9_]*__\b', Name.Builtin.Pseudo),
(r'(%s)\b' % r'|'.join(_stan_builtins.RESERVED), Keyword.Reserved),
# Regular variable names
(r'[A-Za-z][A-Za-z0-9_]*\b', Name),
# Real Literals
(r'-?[0-9]+(\.[0-9]+)?[eE]-?[0-9]+', Number.Float),
(r'-?[0-9]*\.[0-9]*', Number.Float),
# Integer Literals
(r'-?[0-9]+', Number.Integer),
# Assignment operators
# SLexer makes these tokens Operators.
(r'<-|~', Operator),
# Infix and prefix operators (and = )
(r"\+|-|\.?\*|\.?/|\\|'|==?|!=?|<=?|>=?|\|\||&&", Operator),
# Block delimiters
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r'^\s*parameters\s*\{', text, re.M):
return 1.0
else:
return 0.0
class IDLLexer(RegexLexer):
"""
Pygments Lexer for IDL (Interactive Data Language).
*New in Pygments 1.6.*
"""
name = 'IDL'
aliases = ['idl']
filenames = ['*.pro']
mimetypes = ['text/idl']
_RESERVED = ['and', 'begin', 'break', 'case', 'common', 'compile_opt',
'continue', 'do', 'else', 'end', 'endcase', 'elseelse',
'endfor', 'endforeach', 'endif', 'endrep', 'endswitch',
'endwhile', 'eq', 'for', 'foreach', 'forward_function',
'function', 'ge', 'goto', 'gt', 'if', 'inherits', 'le',
'lt', 'mod', 'ne', 'not', 'of', 'on_ioerror', 'or', 'pro',
'repeat', 'switch', 'then', 'until', 'while', 'xor']
"""Reserved words from: http://www.exelisvis.com/docs/reswords.html"""
_BUILTIN_LIB = ['abs', 'acos', 'adapt_hist_equal', 'alog', 'alog10',
'amoeba', 'annotate', 'app_user_dir', 'app_user_dir_query',
'arg_present', 'array_equal', 'array_indices', 'arrow',
'ascii_template', 'asin', 'assoc', 'atan', 'axis',
'a_correlate', 'bandpass_filter', 'bandreject_filter',
'barplot', 'bar_plot', 'beseli', 'beselj', 'beselk',
'besely', 'beta', 'bilinear', 'binary_template', 'bindgen',
'binomial', 'bin_date', 'bit_ffs', 'bit_population',
'blas_axpy', 'blk_con', 'box_cursor', 'breakpoint',
'broyden', 'butterworth', 'bytarr', 'byte', 'byteorder',
'bytscl', 'caldat', 'calendar', 'call_external',
'call_function', 'call_method', 'call_procedure', 'canny',
'catch', 'cd', 'cdf_[0-9a-za-z_]*', 'ceil', 'chebyshev',
'check_math',
'chisqr_cvf', 'chisqr_pdf', 'choldc', 'cholsol', 'cindgen',
'cir_3pnt', 'close', 'cluster', 'cluster_tree', 'clust_wts',
'cmyk_convert', 'colorbar', 'colorize_sample',
'colormap_applicable', 'colormap_gradient',
'colormap_rotation', 'colortable', 'color_convert',
'color_exchange', 'color_quan', 'color_range_map', 'comfit',
'command_line_args', 'complex', 'complexarr', 'complexround',
'compute_mesh_normals', 'cond', 'congrid', 'conj',
'constrained_min', 'contour', 'convert_coord', 'convol',
'convol_fft', 'coord2to3', 'copy_lun', 'correlate', 'cos',
'cosh', 'cpu', 'cramer', 'create_cursor', 'create_struct',
'create_view', 'crossp', 'crvlength', 'cti_test',
'ct_luminance', 'cursor', 'curvefit', 'cvttobm', 'cv_coord',
'cw_animate', 'cw_animate_getp', 'cw_animate_load',
'cw_animate_run', 'cw_arcball', 'cw_bgroup', 'cw_clr_index',
'cw_colorsel', 'cw_defroi', 'cw_field', 'cw_filesel',
'cw_form', 'cw_fslider', 'cw_light_editor',
'cw_light_editor_get', 'cw_light_editor_set', 'cw_orient',
'cw_palette_editor', 'cw_palette_editor_get',
'cw_palette_editor_set', 'cw_pdmenu', 'cw_rgbslider',
'cw_tmpl', 'cw_zoom', 'c_correlate', 'dblarr', 'db_exists',
'dcindgen', 'dcomplex', 'dcomplexarr', 'define_key',
'define_msgblk', 'define_msgblk_from_file', 'defroi',
'defsysv', 'delvar', 'dendrogram', 'dendro_plot', 'deriv',
'derivsig', 'determ', 'device', 'dfpmin', 'diag_matrix',
'dialog_dbconnect', 'dialog_message', 'dialog_pickfile',
'dialog_printersetup', 'dialog_printjob',
'dialog_read_image', 'dialog_write_image', 'digital_filter',
'dilate', 'dindgen', 'dissolve', 'dist', 'distance_measure',
'dlm_load', 'dlm_register', 'doc_library', 'double',
'draw_roi', 'edge_dog', 'efont', 'eigenql', 'eigenvec',
'ellipse', 'elmhes', 'emboss', 'empty', 'enable_sysrtn',
'eof', 'eos_[0-9a-za-z_]*', 'erase', 'erf', 'erfc', 'erfcx',
'erode', 'errorplot', 'errplot', 'estimator_filter',
'execute', 'exit', 'exp', 'expand', 'expand_path', 'expint',
'extrac', 'extract_slice', 'factorial', 'fft', 'filepath',
'file_basename', 'file_chmod', 'file_copy', 'file_delete',
'file_dirname', 'file_expand_path', 'file_info',
'file_lines', 'file_link', 'file_mkdir', 'file_move',
'file_poll_input', 'file_readlink', 'file_same',
'file_search', 'file_test', 'file_which', 'findgen',
'finite', 'fix', 'flick', 'float', 'floor', 'flow3',
'fltarr', 'flush', 'format_axis_values', 'free_lun',
'fstat', 'fulstr', 'funct', 'fv_test', 'fx_root',
'fz_roots', 'f_cvf', 'f_pdf', 'gamma', 'gamma_ct',
'gauss2dfit', 'gaussfit', 'gaussian_function', 'gaussint',
'gauss_cvf', 'gauss_pdf', 'gauss_smooth', 'getenv',
'getwindows', 'get_drive_list', 'get_dxf_objects',
'get_kbrd', 'get_login_info', 'get_lun', 'get_screen_size',
'greg2jul', 'grib_[0-9a-za-z_]*', 'grid3', 'griddata',
'grid_input', 'grid_tps', 'gs_iter',
'h5[adfgirst]_[0-9a-za-z_]*', 'h5_browser', 'h5_close',
'h5_create', 'h5_get_libversion', 'h5_open', 'h5_parse',
'hanning', 'hash', 'hdf_[0-9a-za-z_]*', 'heap_free',
'heap_gc', 'heap_nosave', 'heap_refcount', 'heap_save',
'help', 'hilbert', 'histogram', 'hist_2d', 'hist_equal',
'hls', 'hough', 'hqr', 'hsv', 'h_eq_ct', 'h_eq_int',
'i18n_multibytetoutf8', 'i18n_multibytetowidechar',
'i18n_utf8tomultibyte', 'i18n_widechartomultibyte',
'ibeta', 'icontour', 'iconvertcoord', 'idelete', 'identity',
'idlexbr_assistant', 'idlitsys_createtool', 'idl_base64',
'idl_validname', 'iellipse', 'igamma', 'igetcurrent',
'igetdata', 'igetid', 'igetproperty', 'iimage', 'image',
'image_cont', 'image_statistics', 'imaginary', 'imap',
'indgen', 'intarr', 'interpol', 'interpolate',
'interval_volume', 'int_2d', 'int_3d', 'int_tabulated',
'invert', 'ioctl', 'iopen', 'iplot', 'ipolygon',
'ipolyline', 'iputdata', 'iregister', 'ireset', 'iresolve',
'irotate', 'ir_filter', 'isa', 'isave', 'iscale',
'isetcurrent', 'isetproperty', 'ishft', 'isocontour',
'isosurface', 'isurface', 'itext', 'itranslate', 'ivector',
'ivolume', 'izoom', 'i_beta', 'journal', 'json_parse',
'json_serialize', 'jul2greg', 'julday', 'keyword_set',
'krig2d', 'kurtosis', 'kw_test', 'l64indgen', 'label_date',
'label_region', 'ladfit', 'laguerre', 'laplacian',
'la_choldc', 'la_cholmprove', 'la_cholsol', 'la_determ',
'la_eigenproblem', 'la_eigenql', 'la_eigenvec', 'la_elmhes',
'la_gm_linear_model', 'la_hqr', 'la_invert',
'la_least_squares', 'la_least_square_equality',
'la_linear_equation', 'la_ludc', 'la_lumprove', 'la_lusol',
'la_svd', 'la_tridc', 'la_trimprove', 'la_triql',
'la_trired', 'la_trisol', 'least_squares_filter', 'leefilt',
'legend', 'legendre', 'linbcg', 'lindgen', 'linfit',
'linkimage', 'list', 'll_arc_distance', 'lmfit', 'lmgr',
'lngamma', 'lnp_test', 'loadct', 'locale_get',
'logical_and', 'logical_or', 'logical_true', 'lon64arr',
'lonarr', 'long', 'long64', 'lsode', 'ludc', 'lumprove',
'lusol', 'lu_complex', 'machar', 'make_array', 'make_dll',
'make_rt', 'map', 'mapcontinents', 'mapgrid', 'map_2points',
'map_continents', 'map_grid', 'map_image', 'map_patch',
'map_proj_forward', 'map_proj_image', 'map_proj_info',
'map_proj_init', 'map_proj_inverse', 'map_set',
'matrix_multiply', 'matrix_power', 'max', 'md_test',
'mean', 'meanabsdev', 'mean_filter', 'median', 'memory',
'mesh_clip', 'mesh_decimate', 'mesh_issolid', 'mesh_merge',
'mesh_numtriangles', 'mesh_obj', 'mesh_smooth',
'mesh_surfacearea', 'mesh_validate', 'mesh_volume',
'message', 'min', 'min_curve_surf', 'mk_html_help',
'modifyct', 'moment', 'morph_close', 'morph_distance',
'morph_gradient', 'morph_hitormiss', 'morph_open',
'morph_thin', 'morph_tophat', 'multi', 'm_correlate',
'ncdf_[0-9a-za-z_]*', 'newton', 'noise_hurl', 'noise_pick',
'noise_scatter', 'noise_slur', 'norm', 'n_elements',
'n_params', 'n_tags', 'objarr', 'obj_class', 'obj_destroy',
'obj_hasmethod', 'obj_isa', 'obj_new', 'obj_valid',
'online_help', 'on_error', 'open', 'oplot', 'oploterr',
'parse_url', 'particle_trace', 'path_cache', 'path_sep',
'pcomp', 'plot', 'plot3d', 'ploterr', 'plots', 'plot_3dbox',
'plot_field', 'pnt_line', 'point_lun', 'polarplot',
'polar_contour', 'polar_surface', 'poly', 'polyfill',
'polyfillv', 'polygon', 'polyline', 'polyshade', 'polywarp',
'poly_2d', 'poly_area', 'poly_fit', 'popd', 'powell',
'pref_commit', 'pref_get', 'pref_set', 'prewitt', 'primes',
'print', 'printd', 'product', 'profile', 'profiler',
'profiles', 'project_vol', 'psafm', 'pseudo',
'ps_show_fonts', 'ptrarr', 'ptr_free', 'ptr_new',
'ptr_valid', 'pushd', 'p_correlate', 'qgrid3', 'qhull',
'qromb', 'qromo', 'qsimp', 'query_ascii', 'query_bmp',
'query_csv', 'query_dicom', 'query_gif', 'query_image',
'query_jpeg', 'query_jpeg2000', 'query_mrsid', 'query_pict',
'query_png', 'query_ppm', 'query_srf', 'query_tiff',
'query_wav', 'radon', 'randomn', 'randomu', 'ranks',
'rdpix', 'read', 'reads', 'readu', 'read_ascii',
'read_binary', 'read_bmp', 'read_csv', 'read_dicom',
'read_gif', 'read_image', 'read_interfile', 'read_jpeg',
'read_jpeg2000', 'read_mrsid', 'read_pict', 'read_png',
'read_ppm', 'read_spr', 'read_srf', 'read_sylk',
'read_tiff', 'read_wav', 'read_wave', 'read_x11_bitmap',
'read_xwd', 'real_part', 'rebin', 'recall_commands',
'recon3', 'reduce_colors', 'reform', 'region_grow',
'register_cursor', 'regress', 'replicate',
'replicate_inplace', 'resolve_all', 'resolve_routine',
'restore', 'retall', 'return', 'reverse', 'rk4', 'roberts',
'rot', 'rotate', 'round', 'routine_filepath',
'routine_info', 'rs_test', 'r_correlate', 'r_test',
'save', 'savgol', 'scale3', 'scale3d', 'scope_level',
'scope_traceback', 'scope_varfetch', 'scope_varname',
'search2d', 'search3d', 'sem_create', 'sem_delete',
'sem_lock', 'sem_release', 'setenv', 'set_plot',
'set_shading', 'sfit', 'shade_surf', 'shade_surf_irr',
'shade_volume', 'shift', 'shift_diff', 'shmdebug', 'shmmap',
'shmunmap', 'shmvar', 'show3', 'showfont', 'simplex', 'sin',
'sindgen', 'sinh', 'size', 'skewness', 'skip_lun',
'slicer3', 'slide_image', 'smooth', 'sobel', 'socket',
'sort', 'spawn', 'spher_harm', 'sph_4pnt', 'sph_scat',
'spline', 'spline_p', 'spl_init', 'spl_interp', 'sprsab',
'sprsax', 'sprsin', 'sprstp', 'sqrt', 'standardize',
'stddev', 'stop', 'strarr', 'strcmp', 'strcompress',
'streamline', 'stregex', 'stretch', 'string', 'strjoin',
'strlen', 'strlowcase', 'strmatch', 'strmessage', 'strmid',
'strpos', 'strput', 'strsplit', 'strtrim', 'struct_assign',
'struct_hide', 'strupcase', 'surface', 'surfr', 'svdc',
'svdfit', 'svsol', 'swap_endian', 'swap_endian_inplace',
'symbol', 'systime', 's_test', 't3d', 'tag_names', 'tan',
'tanh', 'tek_color', 'temporary', 'tetra_clip',
'tetra_surface', 'tetra_volume', 'text', 'thin', 'threed',
'timegen', 'time_test2', 'tm_test', 'total', 'trace',
'transpose', 'triangulate', 'trigrid', 'triql', 'trired',
'trisol', 'tri_surf', 'truncate_lun', 'ts_coef', 'ts_diff',
'ts_fcast', 'ts_smooth', 'tv', 'tvcrs', 'tvlct', 'tvrd',
'tvscl', 'typename', 't_cvt', 't_pdf', 'uindgen', 'uint',
'uintarr', 'ul64indgen', 'ulindgen', 'ulon64arr', 'ulonarr',
'ulong', 'ulong64', 'uniq', 'unsharp_mask', 'usersym',
'value_locate', 'variance', 'vector', 'vector_field', 'vel',
'velovect', 'vert_t3d', 'voigt', 'voronoi', 'voxel_proj',
'wait', 'warp_tri', 'watershed', 'wdelete', 'wf_draw',
'where', 'widget_base', 'widget_button', 'widget_combobox',
'widget_control', 'widget_displaycontextmen', 'widget_draw',
'widget_droplist', 'widget_event', 'widget_info',
'widget_label', 'widget_list', 'widget_propertysheet',
'widget_slider', 'widget_tab', 'widget_table',
'widget_text', 'widget_tree', 'widget_tree_move',
'widget_window', 'wiener_filter', 'window', 'writeu',
'write_bmp', 'write_csv', 'write_gif', 'write_image',
'write_jpeg', 'write_jpeg2000', 'write_nrif', 'write_pict',
'write_png', 'write_ppm', 'write_spr', 'write_srf',
'write_sylk', 'write_tiff', 'write_wav', 'write_wave',
'wset', 'wshow', 'wtn', 'wv_applet', 'wv_cwt',
'wv_cw_wavelet', 'wv_denoise', 'wv_dwt', 'wv_fn_coiflet',
'wv_fn_daubechies', 'wv_fn_gaussian', 'wv_fn_haar',
'wv_fn_morlet', 'wv_fn_paul', 'wv_fn_symlet',
'wv_import_data', 'wv_import_wavelet', 'wv_plot3d_wps',
'wv_plot_multires', 'wv_pwt', 'wv_tool_denoise',
'xbm_edit', 'xdisplayfile', 'xdxf', 'xfont',
'xinteranimate', 'xloadct', 'xmanager', 'xmng_tmpl',
'xmtool', 'xobjview', 'xobjview_rotate',
'xobjview_write_image', 'xpalette', 'xpcolor', 'xplot3d',
'xregistered', 'xroi', 'xsq_test', 'xsurface', 'xvaredit',
'xvolume', 'xvolume_rotate', 'xvolume_write_image',
'xyouts', 'zoom', 'zoom_24']
"""Functions from: http://www.exelisvis.com/docs/routines-1.html"""
tokens = {
'root': [
(r'^\s*;.*?\n', Comment.Singleline),
(r'\b(' + '|'.join(_RESERVED) + r')\b', Keyword),
(r'\b(' + '|'.join(_BUILTIN_LIB) + r')\b', Name.Builtin),
(r'\+=|-=|\^=|\*=|/=|#=|##=|<=|>=|=', Operator),
(r'\+\+|--|->|\+|-|##|#|\*|/|<|>|&&|\^|~|\|\|\?|:', Operator),
(r'\b(mod=|lt=|le=|eq=|ne=|ge=|gt=|not=|and=|or=|xor=)', Operator),
(r'\b(mod|lt|le|eq|ne|ge|gt|not|and|or|xor)\b', Operator),
(r'\b[0-9](L|B|S|UL|ULL|LL)?\b', Number),
(r'.', Text),
]
}
class RdLexer(RegexLexer):
"""
Pygments Lexer for R documentation (Rd) files
This is a very minimal implementation, highlighting little more
than the macros. A description of Rd syntax is found in `Writing R
Extensions <http://cran.r-project.org/doc/manuals/R-exts.html>`_
and `Parsing Rd files <developer.r-project.org/parseRd.pdf>`_.
*New in Pygments 1.6.*
"""
name = 'Rd'
aliases = ['rd']
filenames = ['*.Rd']
mimetypes = ['text/x-r-doc']
# To account for verbatim / LaTeX-like / and R-like areas
# would require parsing.
tokens = {
'root' : [
# catch escaped brackets and percent sign
(r'\\[\\{}%]', String.Escape),
# comments
(r'%.*$', Comment),
# special macros with no arguments
(r'\\(?:cr|l?dots|R|tab)\b', Keyword.Constant),
# macros
(r'\\[a-zA-Z]+\b', Keyword),
# special preprocessor macros
(r'^\s*#(?:ifn?def|endif).*\b', Comment.Preproc),
# non-escaped brackets
(r'[{}]', Name.Builtin),
# everything else
(r'[^\\%\n{}]+', Text),
(r'.', Text),
]
}
class IgorLexer(RegexLexer):
"""
Pygments Lexer for Igor Pro procedure files (.ipf).
See http://www.wavemetrics.com/ and http://www.igorexchange.com/.
*New in Pygments 1.7.*
"""
name = 'Igor'
aliases = ['igor', 'igorpro']
filenames = ['*.ipf']
mimetypes = ['text/ipf']
flags = re.IGNORECASE
flowControl = [
'if', 'else', 'elseif', 'endif', 'for', 'endfor', 'strswitch', 'switch',
'case', 'endswitch', 'do', 'while', 'try', 'catch', 'endtry', 'break',
'continue', 'return',
]
types = [
'variable', 'string', 'constant', 'strconstant', 'NVAR', 'SVAR', 'WAVE',
'STRUCT', 'ThreadSafe', 'function', 'end', 'static', 'macro', 'window',
'graph', 'Structure', 'EndStructure', 'EndMacro', 'FuncFit', 'Proc',
'Picture', 'Menu', 'SubMenu', 'Prompt', 'DoPrompt',
]
operations = [
'Abort', 'AddFIFOData', 'AddFIFOVectData', 'AddMovieAudio',
'AddMovieFrame', 'APMath', 'Append', 'AppendImage',
'AppendLayoutObject', 'AppendMatrixContour', 'AppendText',
'AppendToGraph', 'AppendToLayout', 'AppendToTable', 'AppendXYZContour',
'AutoPositionWindow', 'BackgroundInfo', 'Beep', 'BoundingBall',
'BrowseURL', 'BuildMenu', 'Button', 'cd', 'Chart', 'CheckBox',
'CheckDisplayed', 'ChooseColor', 'Close', 'CloseMovie', 'CloseProc',
'ColorScale', 'ColorTab2Wave', 'Concatenate', 'ControlBar',
'ControlInfo', 'ControlUpdate', 'ConvexHull', 'Convolve', 'CopyFile',
'CopyFolder', 'CopyScales', 'Correlate', 'CreateAliasShortcut', 'Cross',
'CtrlBackground', 'CtrlFIFO', 'CtrlNamedBackground', 'Cursor',
'CurveFit', 'CustomControl', 'CWT', 'Debugger', 'DebuggerOptions',
'DefaultFont', 'DefaultGuiControls', 'DefaultGuiFont', 'DefineGuide',
'DelayUpdate', 'DeleteFile', 'DeleteFolder', 'DeletePoints',
'Differentiate', 'dir', 'Display', 'DisplayHelpTopic',
'DisplayProcedure', 'DoAlert', 'DoIgorMenu', 'DoUpdate', 'DoWindow',
'DoXOPIdle', 'DrawAction', 'DrawArc', 'DrawBezier', 'DrawLine',
'DrawOval', 'DrawPICT', 'DrawPoly', 'DrawRect', 'DrawRRect', 'DrawText',
'DSPDetrend', 'DSPPeriodogram', 'Duplicate', 'DuplicateDataFolder',
'DWT', 'EdgeStats', 'Edit', 'ErrorBars', 'Execute', 'ExecuteScriptText',
'ExperimentModified', 'Extract', 'FastGaussTransform', 'FastOp',
'FBinRead', 'FBinWrite', 'FFT', 'FIFO2Wave', 'FIFOStatus', 'FilterFIR',
'FilterIIR', 'FindLevel', 'FindLevels', 'FindPeak', 'FindPointsInPoly',
'FindRoots', 'FindSequence', 'FindValue', 'FPClustering', 'fprintf',
'FReadLine', 'FSetPos', 'FStatus', 'FTPDelete', 'FTPDownload',
'FTPUpload', 'FuncFit', 'FuncFitMD', 'GetAxis', 'GetFileFolderInfo',
'GetLastUserMenuInfo', 'GetMarquee', 'GetSelection', 'GetWindow',
'GraphNormal', 'GraphWaveDraw', 'GraphWaveEdit', 'Grep', 'GroupBox',
'Hanning', 'HideIgorMenus', 'HideInfo', 'HideProcedures', 'HideTools',
'HilbertTransform', 'Histogram', 'IFFT', 'ImageAnalyzeParticles',
'ImageBlend', 'ImageBoundaryToMask', 'ImageEdgeDetection',
'ImageFileInfo', 'ImageFilter', 'ImageFocus', 'ImageGenerateROIMask',
'ImageHistModification', 'ImageHistogram', 'ImageInterpolate',
'ImageLineProfile', 'ImageLoad', 'ImageMorphology', 'ImageRegistration',
'ImageRemoveBackground', 'ImageRestore', 'ImageRotate', 'ImageSave',
'ImageSeedFill', 'ImageSnake', 'ImageStats', 'ImageThreshold',
'ImageTransform', 'ImageUnwrapPhase', 'ImageWindow', 'IndexSort',
'InsertPoints', 'Integrate', 'IntegrateODE', 'Interp3DPath',
'Interpolate3D', 'KillBackground', 'KillControl', 'KillDataFolder',
'KillFIFO', 'KillFreeAxis', 'KillPath', 'KillPICTs', 'KillStrings',
'KillVariables', 'KillWaves', 'KillWindow', 'KMeans', 'Label', 'Layout',
'Legend', 'LinearFeedbackShiftRegister', 'ListBox', 'LoadData',
'LoadPackagePreferences', 'LoadPICT', 'LoadWave', 'Loess',
'LombPeriodogram', 'Make', 'MakeIndex', 'MarkPerfTestTime',
'MatrixConvolve', 'MatrixCorr', 'MatrixEigenV', 'MatrixFilter',
'MatrixGaussJ', 'MatrixInverse', 'MatrixLinearSolve',
'MatrixLinearSolveTD', 'MatrixLLS', 'MatrixLUBkSub', 'MatrixLUD',
'MatrixMultiply', 'MatrixOP', 'MatrixSchur', 'MatrixSolve',
'MatrixSVBkSub', 'MatrixSVD', 'MatrixTranspose', 'MeasureStyledText',
'Modify', 'ModifyContour', 'ModifyControl', 'ModifyControlList',
'ModifyFreeAxis', 'ModifyGraph', 'ModifyImage', 'ModifyLayout',
'ModifyPanel', 'ModifyTable', 'ModifyWaterfall', 'MoveDataFolder',
'MoveFile', 'MoveFolder', 'MoveString', 'MoveSubwindow', 'MoveVariable',
'MoveWave', 'MoveWindow', 'NeuralNetworkRun', 'NeuralNetworkTrain',
'NewDataFolder', 'NewFIFO', 'NewFIFOChan', 'NewFreeAxis', 'NewImage',
'NewLayout', 'NewMovie', 'NewNotebook', 'NewPanel', 'NewPath',
'NewWaterfall', 'Note', 'Notebook', 'NotebookAction', 'Open',
'OpenNotebook', 'Optimize', 'ParseOperationTemplate', 'PathInfo',
'PauseForUser', 'PauseUpdate', 'PCA', 'PlayMovie', 'PlayMovieAction',
'PlaySnd', 'PlaySound', 'PopupContextualMenu', 'PopupMenu',
'Preferences', 'PrimeFactors', 'Print', 'printf', 'PrintGraphs',
'PrintLayout', 'PrintNotebook', 'PrintSettings', 'PrintTable',
'Project', 'PulseStats', 'PutScrapText', 'pwd', 'Quit',
'RatioFromNumber', 'Redimension', 'Remove', 'RemoveContour',
'RemoveFromGraph', 'RemoveFromLayout', 'RemoveFromTable', 'RemoveImage',
'RemoveLayoutObjects', 'RemovePath', 'Rename', 'RenameDataFolder',
'RenamePath', 'RenamePICT', 'RenameWindow', 'ReorderImages',
'ReorderTraces', 'ReplaceText', 'ReplaceWave', 'Resample',
'ResumeUpdate', 'Reverse', 'Rotate', 'Save', 'SaveData',
'SaveExperiment', 'SaveGraphCopy', 'SaveNotebook',
'SavePackagePreferences', 'SavePICT', 'SaveTableCopy',
'SetActiveSubwindow', 'SetAxis', 'SetBackground', 'SetDashPattern',
'SetDataFolder', 'SetDimLabel', 'SetDrawEnv', 'SetDrawLayer',
'SetFileFolderInfo', 'SetFormula', 'SetIgorHook', 'SetIgorMenuMode',
'SetIgorOption', 'SetMarquee', 'SetProcessSleep', 'SetRandomSeed',
'SetScale', 'SetVariable', 'SetWaveLock', 'SetWindow', 'ShowIgorMenus',
'ShowInfo', 'ShowTools', 'Silent', 'Sleep', 'Slider', 'Smooth',
'SmoothCustom', 'Sort', 'SoundInRecord', 'SoundInSet',
'SoundInStartChart', 'SoundInStatus', 'SoundInStopChart',
'SphericalInterpolate', 'SphericalTriangulate', 'SplitString',
'sprintf', 'sscanf', 'Stack', 'StackWindows',
'StatsAngularDistanceTest', 'StatsANOVA1Test', 'StatsANOVA2NRTest',
'StatsANOVA2RMTest', 'StatsANOVA2Test', 'StatsChiTest',
'StatsCircularCorrelationTest', 'StatsCircularMeans',
'StatsCircularMoments', 'StatsCircularTwoSampleTest',
'StatsCochranTest', 'StatsContingencyTable', 'StatsDIPTest',
'StatsDunnettTest', 'StatsFriedmanTest', 'StatsFTest',
'StatsHodgesAjneTest', 'StatsJBTest', 'StatsKendallTauTest',
'StatsKSTest', 'StatsKWTest', 'StatsLinearCorrelationTest',
'StatsLinearRegression', 'StatsMultiCorrelationTest',
'StatsNPMCTest', 'StatsNPNominalSRTest', 'StatsQuantiles',
'StatsRankCorrelationTest', 'StatsResample', 'StatsSample',
'StatsScheffeTest', 'StatsSignTest', 'StatsSRTest', 'StatsTTest',
'StatsTukeyTest', 'StatsVariancesTest', 'StatsWatsonUSquaredTest',
'StatsWatsonWilliamsTest', 'StatsWheelerWatsonTest',
'StatsWilcoxonRankTest', 'StatsWRCorrelationTest', 'String',
'StructGet', 'StructPut', 'TabControl', 'Tag', 'TextBox', 'Tile',
'TileWindows', 'TitleBox', 'ToCommandLine', 'ToolsGrid',
'Triangulate3d', 'Unwrap', 'ValDisplay', 'Variable', 'WaveMeanStdv',
'WaveStats', 'WaveTransform', 'wfprintf', 'WignerTransform',
'WindowFunction',
]
functions = [
'abs', 'acos', 'acosh', 'AiryA', 'AiryAD', 'AiryB', 'AiryBD', 'alog',
'area', 'areaXY', 'asin', 'asinh', 'atan', 'atan2', 'atanh',
'AxisValFromPixel', 'Besseli', 'Besselj', 'Besselk', 'Bessely', 'bessi',
'bessj', 'bessk', 'bessy', 'beta', 'betai', 'BinarySearch',
'BinarySearchInterp', 'binomial', 'binomialln', 'binomialNoise', 'cabs',
'CaptureHistoryStart', 'ceil', 'cequal', 'char2num', 'chebyshev',
'chebyshevU', 'CheckName', 'cmplx', 'cmpstr', 'conj', 'ContourZ', 'cos',
'cosh', 'cot', 'CountObjects', 'CountObjectsDFR', 'cpowi',
'CreationDate', 'csc', 'DataFolderExists', 'DataFolderRefsEqual',
'DataFolderRefStatus', 'date2secs', 'datetime', 'DateToJulian',
'Dawson', 'DDEExecute', 'DDEInitiate', 'DDEPokeString', 'DDEPokeWave',
'DDERequestWave', 'DDEStatus', 'DDETerminate', 'deltax', 'digamma',
'DimDelta', 'DimOffset', 'DimSize', 'ei', 'enoise', 'equalWaves', 'erf',
'erfc', 'exists', 'exp', 'expInt', 'expNoise', 'factorial', 'fakedata',
'faverage', 'faverageXY', 'FindDimLabel', 'FindListItem', 'floor',
'FontSizeHeight', 'FontSizeStringWidth', 'FresnelCos', 'FresnelSin',
'gamma', 'gammaInc', 'gammaNoise', 'gammln', 'gammp', 'gammq', 'Gauss',
'Gauss1D', 'Gauss2D', 'gcd', 'GetDefaultFontSize',
'GetDefaultFontStyle', 'GetKeyState', 'GetRTError', 'gnoise',
'GrepString', 'hcsr', 'hermite', 'hermiteGauss', 'HyperG0F1',
'HyperG1F1', 'HyperG2F1', 'HyperGNoise', 'HyperGPFQ', 'IgorVersion',
'ilim', 'imag', 'Inf', 'Integrate1D', 'interp', 'Interp2D', 'Interp3D',
'inverseERF', 'inverseERFC', 'ItemsInList', 'jlim', 'Laguerre',
'LaguerreA', 'LaguerreGauss', 'leftx', 'LegendreA', 'limit', 'ln',
'log', 'logNormalNoise', 'lorentzianNoise', 'magsqr', 'MandelbrotPoint',
'MarcumQ', 'MatrixDet', 'MatrixDot', 'MatrixRank', 'MatrixTrace', 'max',
'mean', 'min', 'mod', 'ModDate', 'NaN', 'norm', 'NumberByKey',
'numpnts', 'numtype', 'NumVarOrDefault', 'NVAR_Exists', 'p2rect',
'ParamIsDefault', 'pcsr', 'Pi', 'PixelFromAxisVal', 'pnt2x',
'poissonNoise', 'poly', 'poly2D', 'PolygonArea', 'qcsr', 'r2polar',
'real', 'rightx', 'round', 'sawtooth', 'ScreenResolution', 'sec',
'SelectNumber', 'sign', 'sin', 'sinc', 'sinh', 'SphericalBessJ',
'SphericalBessJD', 'SphericalBessY', 'SphericalBessYD',
'SphericalHarmonics', 'sqrt', 'StartMSTimer', 'StatsBetaCDF',
'StatsBetaPDF', 'StatsBinomialCDF', 'StatsBinomialPDF',
'StatsCauchyCDF', 'StatsCauchyPDF', 'StatsChiCDF', 'StatsChiPDF',
'StatsCMSSDCDF', 'StatsCorrelation', 'StatsDExpCDF', 'StatsDExpPDF',
'StatsErlangCDF', 'StatsErlangPDF', 'StatsErrorPDF', 'StatsEValueCDF',
'StatsEValuePDF', 'StatsExpCDF', 'StatsExpPDF', 'StatsFCDF',
'StatsFPDF', 'StatsFriedmanCDF', 'StatsGammaCDF', 'StatsGammaPDF',
'StatsGeometricCDF', 'StatsGeometricPDF', 'StatsHyperGCDF',
'StatsHyperGPDF', 'StatsInvBetaCDF', 'StatsInvBinomialCDF',
'StatsInvCauchyCDF', 'StatsInvChiCDF', 'StatsInvCMSSDCDF',
'StatsInvDExpCDF', 'StatsInvEValueCDF', 'StatsInvExpCDF',
'StatsInvFCDF', 'StatsInvFriedmanCDF', 'StatsInvGammaCDF',
'StatsInvGeometricCDF', 'StatsInvKuiperCDF', 'StatsInvLogisticCDF',
'StatsInvLogNormalCDF', 'StatsInvMaxwellCDF', 'StatsInvMooreCDF',
'StatsInvNBinomialCDF', 'StatsInvNCChiCDF', 'StatsInvNCFCDF',
'StatsInvNormalCDF', 'StatsInvParetoCDF', 'StatsInvPoissonCDF',
'StatsInvPowerCDF', 'StatsInvQCDF', 'StatsInvQpCDF',
'StatsInvRayleighCDF', 'StatsInvRectangularCDF', 'StatsInvSpearmanCDF',
'StatsInvStudentCDF', 'StatsInvTopDownCDF', 'StatsInvTriangularCDF',
'StatsInvUsquaredCDF', 'StatsInvVonMisesCDF', 'StatsInvWeibullCDF',
'StatsKuiperCDF', 'StatsLogisticCDF', 'StatsLogisticPDF',
'StatsLogNormalCDF', 'StatsLogNormalPDF', 'StatsMaxwellCDF',
'StatsMaxwellPDF', 'StatsMedian', 'StatsMooreCDF', 'StatsNBinomialCDF',
'StatsNBinomialPDF', 'StatsNCChiCDF', 'StatsNCChiPDF', 'StatsNCFCDF',
'StatsNCFPDF', 'StatsNCTCDF', 'StatsNCTPDF', 'StatsNormalCDF',
'StatsNormalPDF', 'StatsParetoCDF', 'StatsParetoPDF', 'StatsPermute',
'StatsPoissonCDF', 'StatsPoissonPDF', 'StatsPowerCDF',
'StatsPowerNoise', 'StatsPowerPDF', 'StatsQCDF', 'StatsQpCDF',
'StatsRayleighCDF', 'StatsRayleighPDF', 'StatsRectangularCDF',
'StatsRectangularPDF', 'StatsRunsCDF', 'StatsSpearmanRhoCDF',
'StatsStudentCDF', 'StatsStudentPDF', 'StatsTopDownCDF',
'StatsTriangularCDF', 'StatsTriangularPDF', 'StatsTrimmedMean',
'StatsUSquaredCDF', 'StatsVonMisesCDF', 'StatsVonMisesNoise',
'StatsVonMisesPDF', 'StatsWaldCDF', 'StatsWaldPDF', 'StatsWeibullCDF',
'StatsWeibullPDF', 'StopMSTimer', 'str2num', 'stringCRC', 'stringmatch',
'strlen', 'strsearch', 'StudentA', 'StudentT', 'sum', 'SVAR_Exists',
'TagVal', 'tan', 'tanh', 'ThreadGroupCreate', 'ThreadGroupRelease',
'ThreadGroupWait', 'ThreadProcessorCount', 'ThreadReturnValue', 'ticks',
'trunc', 'Variance', 'vcsr', 'WaveCRC', 'WaveDims', 'WaveExists',
'WaveMax', 'WaveMin', 'WaveRefsEqual', 'WaveType', 'WhichListItem',
'WinType', 'WNoise', 'x', 'x2pnt', 'xcsr', 'y', 'z', 'zcsr', 'ZernikeR',
]
functions += [
'AddListItem', 'AnnotationInfo', 'AnnotationList', 'AxisInfo',
'AxisList', 'CaptureHistory', 'ChildWindowList', 'CleanupName',
'ContourInfo', 'ContourNameList', 'ControlNameList', 'CsrInfo',
'CsrWave', 'CsrXWave', 'CTabList', 'DataFolderDir', 'date',
'DDERequestString', 'FontList', 'FuncRefInfo', 'FunctionInfo',
'FunctionList', 'FunctionPath', 'GetDataFolder', 'GetDefaultFont',
'GetDimLabel', 'GetErrMessage', 'GetFormula',
'GetIndependentModuleName', 'GetIndexedObjName', 'GetIndexedObjNameDFR',
'GetRTErrMessage', 'GetRTStackInfo', 'GetScrapText', 'GetUserData',
'GetWavesDataFolder', 'GrepList', 'GuideInfo', 'GuideNameList', 'Hash',
'IgorInfo', 'ImageInfo', 'ImageNameList', 'IndexedDir', 'IndexedFile',
'JulianToDate', 'LayoutInfo', 'ListMatch', 'LowerStr', 'MacroList',
'NameOfWave', 'note', 'num2char', 'num2istr', 'num2str',
'OperationList', 'PadString', 'ParseFilePath', 'PathList', 'PICTInfo',
'PICTList', 'PossiblyQuoteName', 'ProcedureText', 'RemoveByKey',
'RemoveEnding', 'RemoveFromList', 'RemoveListItem',
'ReplaceNumberByKey', 'ReplaceString', 'ReplaceStringByKey',
'Secs2Date', 'Secs2Time', 'SelectString', 'SortList',
'SpecialCharacterInfo', 'SpecialCharacterList', 'SpecialDirPath',
'StringByKey', 'StringFromList', 'StringList', 'StrVarOrDefault',
'TableInfo', 'TextFile', 'ThreadGroupGetDF', 'time', 'TraceFromPixel',
'TraceInfo', 'TraceNameList', 'UniqueName', 'UnPadString', 'UpperStr',
'VariableList', 'WaveInfo', 'WaveList', 'WaveName', 'WaveUnits',
'WinList', 'WinName', 'WinRecreation', 'XWaveName',
'ContourNameToWaveRef', 'CsrWaveRef', 'CsrXWaveRef',
'ImageNameToWaveRef', 'NewFreeWave', 'TagWaveRef', 'TraceNameToWaveRef',
'WaveRefIndexed', 'XWaveRefFromTrace', 'GetDataFolderDFR',
'GetWavesDataFolderDFR', 'NewFreeDataFolder', 'ThreadGroupGetDFR',
]
tokens = {
'root': [
(r'//.*$', Comment.Single),
(r'"([^"\\]|\\.)*"', String),
# Flow Control.
(r'\b(%s)\b' % '|'.join(flowControl), Keyword),
# Types.
(r'\b(%s)\b' % '|'.join(types), Keyword.Type),
# Built-in operations.
(r'\b(%s)\b' % '|'.join(operations), Name.Class),
# Built-in functions.
(r'\b(%s)\b' % '|'.join(functions), Name.Function),
# Compiler directives.
(r'^#(include|pragma|define|ifdef|ifndef|endif)',
Name.Decorator),
(r'[^a-zA-Z"/]+', Text),
(r'.', Text),
],
}
| mit |
xbed/Mixly_Arduino | mixly_arduino/arduino/portable/sketchbook/libraries/Blynk/extras/gen-qr.py | 4 | 1516 | # coding: utf-8
import sys
import pyqrcode
import gzip, zlib
import base64
import json
import re
#data = sys.argv[1]
data = '''{"boardType":"Generic Board","createdAt":1453325389643,"id":17,"isActive":false,"isShared":false,"keepScreenOn":false,"name":"webpage","updatedAt":1453326512172,"widgets":[{"color":616861439,"pushMode":false,"pin":1,"pinType":"VIRTUAL","height":2,"id":1,"label":"","type":"BUTTON","value":"0","width":2,"x":0,"y":0},{"color":-26804225,"splitMode":false,"pins":[{"max":255,"min":0,"pin":0,"pinType":"VIRTUAL","pwmMode":false,"rangeMappingOn":false},{"max":255,"min":0,"pin":0,"pinType":"VIRTUAL","pwmMode":false,"rangeMappingOn":false},{"max":255,"min":0,"pin":0,"pinType":"VIRTUAL","pwmMode":false,"rangeMappingOn":false}],"height":3,"id":2,"type":"RGB","value":"-104705","width":4,"x":2,"y":0},{"notifyWhenOffline":false,"priority":"high","height":1,"id":3,"type":"NOTIFICATION","width":2,"x":0,"y":2}]}'''
# Compact JSON
#data = json.loads(data)
#data = json.dumps(data, separators=(',',':'))
# Compress
data = str.encode(data, 'utf-8')
data = zlib.compress(data)
#data = gzip.compress(data)
data = base64.b64encode(data)
data = bytes.decode(data)
# Add header
data = 'bp1' + data
# Generate QR
url = pyqrcode.create(data, error='M', version=None, mode='binary')
data = url.text(quiet_zone=4)
data = data.replace("0"," ")
data = data.replace("1","██")
# Clean trailing spaces
data = re.sub(r'\s+$', '', data, 0, re.M)
print(data)
#print('\x1b[6;30;47m' + data + '\x1b[0m')
| apache-2.0 |
schumi2004/NOT_UPDATED_Sick-Beard-Dutch | lib/bs4/tests/test_builder_registry.py | 485 | 5374 | """Tests of the builder registry."""
import unittest
from bs4 import BeautifulSoup
from bs4.builder import (
builder_registry as registry,
HTMLParserTreeBuilder,
TreeBuilderRegistry,
)
try:
from bs4.builder import HTML5TreeBuilder
HTML5LIB_PRESENT = True
except ImportError:
HTML5LIB_PRESENT = False
try:
from bs4.builder import (
LXMLTreeBuilderForXML,
LXMLTreeBuilder,
)
LXML_PRESENT = True
except ImportError:
LXML_PRESENT = False
class BuiltInRegistryTest(unittest.TestCase):
"""Test the built-in registry with the default builders registered."""
def test_combination(self):
if LXML_PRESENT:
self.assertEqual(registry.lookup('fast', 'html'),
LXMLTreeBuilder)
if LXML_PRESENT:
self.assertEqual(registry.lookup('permissive', 'xml'),
LXMLTreeBuilderForXML)
self.assertEqual(registry.lookup('strict', 'html'),
HTMLParserTreeBuilder)
if HTML5LIB_PRESENT:
self.assertEqual(registry.lookup('html5lib', 'html'),
HTML5TreeBuilder)
def test_lookup_by_markup_type(self):
if LXML_PRESENT:
self.assertEqual(registry.lookup('html'), LXMLTreeBuilder)
self.assertEqual(registry.lookup('xml'), LXMLTreeBuilderForXML)
else:
self.assertEqual(registry.lookup('xml'), None)
if HTML5LIB_PRESENT:
self.assertEqual(registry.lookup('html'), HTML5TreeBuilder)
else:
self.assertEqual(registry.lookup('html'), HTMLParserTreeBuilder)
def test_named_library(self):
if LXML_PRESENT:
self.assertEqual(registry.lookup('lxml', 'xml'),
LXMLTreeBuilderForXML)
self.assertEqual(registry.lookup('lxml', 'html'),
LXMLTreeBuilder)
if HTML5LIB_PRESENT:
self.assertEqual(registry.lookup('html5lib'),
HTML5TreeBuilder)
self.assertEqual(registry.lookup('html.parser'),
HTMLParserTreeBuilder)
def test_beautifulsoup_constructor_does_lookup(self):
# You can pass in a string.
BeautifulSoup("", features="html")
# Or a list of strings.
BeautifulSoup("", features=["html", "fast"])
# You'll get an exception if BS can't find an appropriate
# builder.
self.assertRaises(ValueError, BeautifulSoup,
"", features="no-such-feature")
class RegistryTest(unittest.TestCase):
"""Test the TreeBuilderRegistry class in general."""
def setUp(self):
self.registry = TreeBuilderRegistry()
def builder_for_features(self, *feature_list):
cls = type('Builder_' + '_'.join(feature_list),
(object,), {'features' : feature_list})
self.registry.register(cls)
return cls
def test_register_with_no_features(self):
builder = self.builder_for_features()
# Since the builder advertises no features, you can't find it
# by looking up features.
self.assertEqual(self.registry.lookup('foo'), None)
# But you can find it by doing a lookup with no features, if
# this happens to be the only registered builder.
self.assertEqual(self.registry.lookup(), builder)
def test_register_with_features_makes_lookup_succeed(self):
builder = self.builder_for_features('foo', 'bar')
self.assertEqual(self.registry.lookup('foo'), builder)
self.assertEqual(self.registry.lookup('bar'), builder)
def test_lookup_fails_when_no_builder_implements_feature(self):
builder = self.builder_for_features('foo', 'bar')
self.assertEqual(self.registry.lookup('baz'), None)
def test_lookup_gets_most_recent_registration_when_no_feature_specified(self):
builder1 = self.builder_for_features('foo')
builder2 = self.builder_for_features('bar')
self.assertEqual(self.registry.lookup(), builder2)
def test_lookup_fails_when_no_tree_builders_registered(self):
self.assertEqual(self.registry.lookup(), None)
def test_lookup_gets_most_recent_builder_supporting_all_features(self):
has_one = self.builder_for_features('foo')
has_the_other = self.builder_for_features('bar')
has_both_early = self.builder_for_features('foo', 'bar', 'baz')
has_both_late = self.builder_for_features('foo', 'bar', 'quux')
lacks_one = self.builder_for_features('bar')
has_the_other = self.builder_for_features('foo')
# There are two builders featuring 'foo' and 'bar', but
# the one that also features 'quux' was registered later.
self.assertEqual(self.registry.lookup('foo', 'bar'),
has_both_late)
# There is only one builder featuring 'foo', 'bar', and 'baz'.
self.assertEqual(self.registry.lookup('foo', 'bar', 'baz'),
has_both_early)
def test_lookup_fails_when_cannot_reconcile_requested_features(self):
builder1 = self.builder_for_features('foo', 'bar')
builder2 = self.builder_for_features('foo', 'baz')
self.assertEqual(self.registry.lookup('bar', 'baz'), None)
| gpl-3.0 |
inessadl/kinect-2-libras | Kinect2Libras/KinectFingerTracking/Lib/DocXMLRPCServer.py | 250 | 10500 | """Self documenting XML-RPC Server.
This module can be used to create XML-RPC servers that
serve pydoc-style documentation in response to HTTP
GET requests. This documentation is dynamically generated
based on the functions and methods registered with the
server.
This module is built upon the pydoc and SimpleXMLRPCServer
modules.
"""
import pydoc
import inspect
import re
import sys
from SimpleXMLRPCServer import (SimpleXMLRPCServer,
SimpleXMLRPCRequestHandler,
CGIXMLRPCRequestHandler,
resolve_dotted_attribute)
class ServerHTMLDoc(pydoc.HTMLDoc):
"""Class used to generate pydoc HTML document for a server"""
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
# XXX Note that this regular expression does not allow for the
# hyperlinking of arbitrary strings being used as method
# names. Only methods with names consisting of word characters
# and '.'s are hyperlinked.
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?((?:\w|\.)+))\b')
while 1:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
def docroutine(self, object, name, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
title = '<a name="%s"><strong>%s</strong></a>' % (
self.escape(anchor), self.escape(name))
if inspect.ismethod(object):
args, varargs, varkw, defaults = inspect.getargspec(object.im_func)
# exclude the argument bound to the instance, it will be
# confusing to the non-Python user
argspec = inspect.formatargspec (
args[1:],
varargs,
varkw,
defaults,
formatvalue=self.formatvalue
)
elif inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
else:
argspec = '(...)'
if isinstance(object, tuple):
argspec = object[0] or argspec
docstring = object[1] or ""
else:
docstring = pydoc.getdoc(object)
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
doc = self.markup(
docstring, self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def docserver(self, server_name, package_documentation, methods):
"""Produce HTML documentation for an XML-RPC server."""
fdict = {}
for key, value in methods.items():
fdict[key] = '#-' + key
fdict[value] = fdict[key]
server_name = self.escape(server_name)
head = '<big><big><strong>%s</strong></big></big>' % server_name
result = self.heading(head, '#ffffff', '#7799ee')
doc = self.markup(package_documentation, self.preformat, fdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
contents = []
method_items = sorted(methods.items())
for key, value in method_items:
contents.append(self.docroutine(value, key, funcs=fdict))
result = result + self.bigsection(
'Methods', '#ffffff', '#eeaa77', pydoc.join(contents))
return result
class XMLRPCDocGenerator:
"""Generates documentation for an XML-RPC server.
This class is designed as mix-in and should not
be constructed directly.
"""
def __init__(self):
# setup variables used for HTML documentation
self.server_name = 'XML-RPC Server Documentation'
self.server_documentation = \
"This server exports the following methods through the XML-RPC "\
"protocol."
self.server_title = 'XML-RPC Server Documentation'
def set_server_title(self, server_title):
"""Set the HTML title of the generated server documentation"""
self.server_title = server_title
def set_server_name(self, server_name):
"""Set the name of the generated HTML server documentation"""
self.server_name = server_name
def set_server_documentation(self, server_documentation):
"""Set the documentation string for the entire server."""
self.server_documentation = server_documentation
def generate_html_documentation(self):
"""generate_html_documentation() => html documentation for the server
Generates HTML documentation for the server using introspection for
installed functions and instances that do not implement the
_dispatch method. Alternatively, instances can choose to implement
the _get_method_argstring(method_name) method to provide the
argument string used in the documentation and the
_methodHelp(method_name) method to provide the help text used
in the documentation."""
methods = {}
for method_name in self.system_listMethods():
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
method_info = [None, None] # argspec, documentation
if hasattr(self.instance, '_get_method_argstring'):
method_info[0] = self.instance._get_method_argstring(method_name)
if hasattr(self.instance, '_methodHelp'):
method_info[1] = self.instance._methodHelp(method_name)
method_info = tuple(method_info)
if method_info != (None, None):
method = method_info
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name
)
except AttributeError:
method = method_info
else:
method = method_info
else:
assert 0, "Could not find method in self.functions and no "\
"instance installed"
methods[method_name] = method
documenter = ServerHTMLDoc()
documentation = documenter.docserver(
self.server_name,
self.server_documentation,
methods
)
return documenter.page(self.server_title, documentation)
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""XML-RPC and documentation request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
Handles all HTTP GET requests and interprets them as requests
for documentation.
"""
def do_GET(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
response = self.server.generate_html_documentation()
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
class DocXMLRPCServer( SimpleXMLRPCServer,
XMLRPCDocGenerator):
"""XML-RPC and HTML documentation server.
Adds the ability to serve server documentation to the capabilities
of SimpleXMLRPCServer.
"""
def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
logRequests=1, allow_none=False, encoding=None,
bind_and_activate=True):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests,
allow_none, encoding, bind_and_activate)
XMLRPCDocGenerator.__init__(self)
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
XMLRPCDocGenerator):
"""Handler for XML-RPC data and documentation requests passed through
CGI"""
def handle_get(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
response = self.generate_html_documentation()
print 'Content-Type: text/html'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def __init__(self):
CGIXMLRPCRequestHandler.__init__(self)
XMLRPCDocGenerator.__init__(self)
| apache-2.0 |
LeiZeng/shadowsocks | tests/coverage_server.py | 1072 | 1655 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
if __name__ == '__main__':
import tornado.ioloop
import tornado.web
import urllib
class MainHandler(tornado.web.RequestHandler):
def get(self, project):
try:
with open('/tmp/%s-coverage' % project, 'rb') as f:
coverage = f.read().strip()
n = int(coverage.strip('%'))
if n >= 80:
color = 'brightgreen'
else:
color = 'yellow'
self.redirect(('https://img.shields.io/badge/'
'coverage-%s-%s.svg'
'?style=flat') %
(urllib.quote(coverage), color))
except IOError:
raise tornado.web.HTTPError(404)
application = tornado.web.Application([
(r"/([a-zA-Z0-9\-_]+)", MainHandler),
])
if __name__ == "__main__":
application.listen(8888, address='127.0.0.1')
tornado.ioloop.IOLoop.instance().start()
| apache-2.0 |
marcosmodesto/django-testapp | djangoappengine/management/commands/deploy.py | 53 | 2298 | import logging
import time
import sys
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
from ...boot import PROJECT_DIR
from ...utils import appconfig
PRE_DEPLOY_COMMANDS = ()
if 'mediagenerator' in settings.INSTALLED_APPS:
PRE_DEPLOY_COMMANDS += ('generatemedia',)
PRE_DEPLOY_COMMANDS = getattr(settings, 'PRE_DEPLOY_COMMANDS',
PRE_DEPLOY_COMMANDS)
POST_DEPLOY_COMMANDS = getattr(settings, 'POST_DEPLOY_COMMANDS', ())
def run_appcfg(argv):
# We don't really want to use that one though, it just executes
# this one.
from google.appengine.tools import appcfg
# Reset the logging level to WARN as appcfg will spew tons of logs
# on INFO.
logging.getLogger().setLevel(logging.WARN)
new_args = argv[:]
new_args[1] = 'update'
if appconfig.runtime != 'python':
new_args.insert(1, '-R')
new_args.append(PROJECT_DIR)
syncdb = True
if '--nosyncdb' in new_args:
syncdb = False
new_args.remove('--nosyncdb')
appcfg.main(new_args)
if syncdb:
print "Running syncdb."
# Wait a little bit for deployment to finish.
for countdown in range(9, 0, -1):
sys.stdout.write('%s\r' % countdown)
time.sleep(1)
from django.db import connections
for connection in connections.all():
if hasattr(connection, 'setup_remote'):
connection.setup_remote()
call_command('syncdb', remote=True, interactive=True)
if getattr(settings, 'ENABLE_PROFILER', False):
print "--------------------------\n" \
"WARNING: PROFILER ENABLED!\n" \
"--------------------------"
class Command(BaseCommand):
"""
Deploys the website to the production server.
Any additional arguments are passed directly to appcfg.py update.
"""
help = "Calls appcfg.py update for the current project."
args = "[any appcfg.py options]"
def run_from_argv(self, argv):
for command in PRE_DEPLOY_COMMANDS:
call_command(command)
try:
run_appcfg(argv)
finally:
for command in POST_DEPLOY_COMMANDS:
call_command(command)
| bsd-3-clause |
brandonPurvis/osf.io | api/institutions/serializers.py | 4 | 1145 | from rest_framework import serializers as ser
from api.base.serializers import JSONAPISerializer, RelationshipField, LinksField
class InstitutionSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'id',
'name'
])
name = ser.CharField(read_only=True)
id = ser.CharField(read_only=True, source='_id')
logo_path = ser.CharField(read_only=True)
auth_url = ser.CharField(read_only=True)
links = LinksField({'self': 'get_api_url', })
nodes = RelationshipField(
related_view='institutions:institution-nodes',
related_view_kwargs={'institution_id': '<pk>'},
)
registrations = RelationshipField(
related_view='institutions:institution-registrations',
related_view_kwargs={'institution_id': '<pk>'}
)
users = RelationshipField(
related_view='institutions:institution-users',
related_view_kwargs={'institution_id': '<pk>'}
)
def get_api_url(self, obj):
return obj.absolute_api_v2_url
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
class Meta:
type_ = 'institutions'
| apache-2.0 |
tinkerinestudio/Tinkerine-Suite | TinkerineSuite/python/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/pubsub/setuparg1.py | 8 | 1584 | '''
Import this file before the first 'from pubsub import pub' statement
to make pubsub use the *arg1* messaging protocol::
from pubsub import setuparg1
from pubsub import pub
This could be necessary in at least two situations:
1. with a default pubsub installation, where *kwargs* messaging protocol
is the default, but an application developer requires the less
stringent *arg1* messaging protocol.
2. with a pubsub installation that has been configured to provide the
legacy v1 API as the default (such as in some versions of wxPython),
but an application developer wants to use the latest API, but with
the messaging protocol closest to that used in the legacy v1 API.
See the setupkwargs module for a description of the default pubsub
messaging protocol, defined as 'kwargs'.
Note that once :mod:pub has been imported, the messaging protocol
cannot be changed. Also, if migrating an application from 'arg1' to 'kwargs'
style messaging, see :func:enforceArgName().
:copyright: Copyright 2006-2009 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE.txt for details.
'''
import pubsubconf
pubsubconf.setVersion(3)
import core
core.setMsgProtocol('arg1')
def enforceArgName(commonName):
'''This will require that all listeners use the same argument
name (commonName) as first parameter. This could be a ueful
first step in transition an application that has been using *arg1*
protocol to the default *kwargs* protocol, see the docs for
details. '''
import core
core.setMsgDataArgName(1, commonName)
| agpl-3.0 |
chosener/RunningSheep | cocos2d/download-deps.py | 63 | 12254 | #!/usr/bin/env python
#coding=utf-8
#
# ./download-deps.py
#
# Download Cocos2D-X resources from github (https://github.com/cocos2d/cocos2d-x-3rd-party-libs-bin) and extract from ZIP
#
# Helps prevent repo bloat due to large binary files since they can
# be hosted separately.
#
"""****************************************************************************
Copyright (c) 2014 cocos2d-x.org
Copyright (c) 2014 Chukong Technologies Inc.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************"""
import os.path,zipfile
import shutil
import sys
import traceback
import distutils
import fileinput
import json
from optparse import OptionParser
from time import time
from sys import stdout
from distutils.errors import DistutilsError
from distutils.dir_util import copy_tree, remove_tree
class UnrecognizedFormat:
def __init__(self, prompt):
self._prompt = prompt
def __str__(self):
return self._prompt
class CocosZipInstaller(object):
def __init__(self, workpath, config_path, version_path, remote_version_key = None):
self._workpath = workpath
self._config_path = config_path
self._version_path = version_path
data = self.load_json_file(config_path)
self._current_version = data["version"]
self._repo_name = data["repo_name"]
self._filename = self._current_version + '.zip'
self._url = data["repo_parent"] + self._repo_name + '/archive/' + self._filename
self._zip_file_size = int(data["zip_file_size"])
# 'v' letter was swallowed by github, so we need to substring it from the 2nd letter
self._extracted_folder_name = os.path.join(self._workpath, self._repo_name + '-' + self._current_version[1:])
try:
data = self.load_json_file(version_path)
if remote_version_key == None:
self._remote_version = data["version"]
else:
self._remote_version = data[remote_version_key]
except:
print("==> version file doesn't exist")
def get_input_value(self, prompt):
ret = raw_input(prompt)
ret.rstrip(" \t")
return ret
def download_file(self):
print("==> Ready to download '%s' from '%s'" % (self._filename, self._url))
import urllib2
try:
u = urllib2.urlopen(self._url)
except urllib2.HTTPError as e:
if e.code == 404:
print("==> Error: Could not find the file from url: '%s'" % (self._url))
print("==> Http request failed, error code: " + str(e.code) + ", reason: " + e.read())
sys.exit(1)
f = open(self._filename, 'wb')
meta = u.info()
content_len = meta.getheaders("Content-Length")
file_size = 0
if content_len and len(content_len) > 0:
file_size = int(content_len[0])
else:
# github server may not reponse a header information which contains `Content-Length`,
# therefore, the size needs to be written hardcode here. While server doesn't return
# `Content-Length`, use it instead
print("==> WARNING: Couldn't grab the file size from remote, use 'zip_file_size' section in '%s'" % self._config_path)
file_size = self._zip_file_size
print("==> Start to download, please wait ...")
file_size_dl = 0
block_sz = 8192
block_size_per_second = 0
old_time=time()
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
block_size_per_second += len(buffer)
f.write(buffer)
new_time = time()
if (new_time - old_time) > 1:
speed = block_size_per_second / (new_time - old_time) / 1000.0
status = ""
if file_size != 0:
percent = file_size_dl * 100. / file_size
status = r"Downloaded: %6dK / Total: %dK, Percent: %3.2f%%, Speed: %6.2f KB/S " % (file_size_dl / 1000, file_size / 1000, percent, speed)
else:
status = r"Downloaded: %6dK, Speed: %6.2f KB/S " % (file_size_dl / 1000, speed)
status = status + chr(8)*(len(status)+1)
print(status),
sys.stdout.flush()
block_size_per_second = 0
old_time = new_time
print("==> Downloading finished!")
f.close()
def ensure_directory(self, target):
if not os.path.exists(target):
os.mkdir(target)
def unpack_zipfile(self, extract_dir):
"""Unpack zip `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
by ``zipfile.is_zipfile()``).
"""
if not zipfile.is_zipfile(self._filename):
raise UnrecognizedFormat("%s is not a zip file" % (self._filename))
print("==> Extracting files, please wait ...")
z = zipfile.ZipFile(self._filename)
try:
for info in z.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
if name.endswith('/'):
# directory
self.ensure_directory(target)
else:
# file
data = z.read(info.filename)
f = open(target,'wb')
try:
f.write(data)
finally:
f.close()
del data
unix_attributes = info.external_attr >> 16
if unix_attributes:
os.chmod(target, unix_attributes)
finally:
z.close()
print("==> Extraction done!")
def ask_to_delete_downloaded_zip_file(self):
ret = self.get_input_value("==> Whether to delete '%s' file? It may be reused when you execute this script next time! (yes/no): " % self._filename)
ret = ret.strip()
if ret != 'yes' and ret != 'no':
print("==> Invalid answer, please answer 'yes' or 'no'!")
return self.ask_to_delete_downloaded_zip_file()
else:
return True if ret == 'yes' else False
def download_zip_file(self):
if not os.path.isfile(self._filename):
self.download_file()
try:
if not zipfile.is_zipfile(self._filename):
raise UnrecognizedFormat("%s is not a zip file" % (self._filename))
except UnrecognizedFormat as e:
print("==> Unrecognized zip format from your local '%s' file!" % (self._filename))
if os.path.isfile(self._filename):
os.remove(self._filename)
print("==> Download it from internet again, please wait...")
self.download_zip_file()
def need_to_update(self):
if not os.path.isfile(self._version_path):
return True
with open(self._version_path) as data_file:
data = json.load(data_file)
if self._remote_version == self._current_version:
return False
return True
def load_json_file(self, file_path):
if not os.path.isfile(file_path):
raise Exception("Could not find (%s)" % (file_path))
with open(file_path) as data_file:
data = json.load(data_file)
return data
def run(self, folder_for_extracting, remove_downloaded, force_update, download_only):
if not force_update and not self.need_to_update():
print("==> Not need to update!")
return
if os.path.exists(self._extracted_folder_name):
shutil.rmtree(self._extracted_folder_name)
self.download_zip_file()
if not download_only:
self.unpack_zipfile(self._workpath)
print("==> Copying files...")
if not os.path.exists(folder_for_extracting):
os.mkdir(folder_for_extracting)
distutils.dir_util.copy_tree(self._extracted_folder_name, folder_for_extracting)
print("==> Cleaning...")
if os.path.exists(self._extracted_folder_name):
shutil.rmtree(self._extracted_folder_name)
if os.path.isfile(self._filename):
if remove_downloaded != None:
if remove_downloaded == 'yes':
os.remove(self._filename)
elif self.ask_to_delete_downloaded_zip_file():
os.remove(self._filename)
else:
print("==> Download (%s) finish!" % self._filename)
def _check_python_version():
major_ver = sys.version_info[0]
if major_ver > 2:
print ("The python version is %d.%d. But python 2.x is required. (Version 2.7 is well tested)\n"
"Download it here: https://www.python.org/" % (major_ver, sys.version_info[1]))
return False
return True
def main():
workpath = os.path.dirname(os.path.realpath(__file__))
if not _check_python_version():
exit()
parser = OptionParser()
parser.add_option('-r', '--remove-download',
action="store", type="string", dest='remove_downloaded', default=None,
help="Whether to remove downloaded zip file, 'yes' or 'no'")
parser.add_option("-f", "--force-update",
action="store_true", dest="force_update", default=False,
help="Whether to force update the third party libraries")
parser.add_option("-d", "--download-only",
action="store_true", dest="download_only", default=False,
help="Only download zip file of the third party libraries, will not extract it")
(opts, args) = parser.parse_args()
print("=======================================================")
print("==> Prepare to download external libraries!")
external_path = os.path.join(workpath, 'external')
installer = CocosZipInstaller(workpath, os.path.join(workpath, 'external', 'config.json'), os.path.join(workpath, 'external', 'version.json'), "prebuilt_libs_version")
installer.run(external_path, opts.remove_downloaded, opts.force_update, opts.download_only)
print("=======================================================")
print("==> Prepare to download lua runtime binaries")
runtime_path = os.path.join(workpath, 'templates', 'lua-template-runtime', 'runtime')
installer = CocosZipInstaller(workpath, os.path.join(runtime_path, 'config.json'), os.path.join(runtime_path, 'version.json'))
installer.run(runtime_path, opts.remove_downloaded, opts.force_update, opts.download_only)
# -------------- main --------------
if __name__ == '__main__':
try:
main()
except Exception as e:
traceback.print_exc()
sys.exit(1)
| gpl-3.0 |
eldarion/kairios | setup.py | 2 | 3037 | from setuptools import find_packages, setup
VERSION = "3.0.0"
LONG_DESCRIPTION = """
.. image:: http://pinaxproject.com/pinax-design/patches/pinax-calendars.svg
:target: https://pypi.python.org/pypi/pinax-calendars/
===============
Pinax Calendars
===============
.. image:: https://img.shields.io/pypi/v/pinax-calendars.svg
:target: https://pypi.python.org/pypi/pinax-calendars/
\
.. image:: https://img.shields.io/circleci/project/github/pinax/pinax-calendars.svg
:target: https://circleci.com/gh/pinax/pinax-calendars
.. image:: https://img.shields.io/codecov/c/github/pinax/pinax-calendars.svg
:target: https://codecov.io/gh/pinax/pinax-calendars
.. image:: https://img.shields.io/github/contributors/pinax/pinax-calendars.svg
:target: https://github.com/pinax/pinax-calendars/graphs/contributors
.. image:: https://img.shields.io/github/issues-pr/pinax/pinax-calendars.svg
:target: https://github.com/pinax/pinax-calendars/pulls
.. image:: https://img.shields.io/github/issues-pr-closed/pinax/pinax-calendars.svg
:target: https://github.com/pinax/pinax-calendars/pulls?q=is%3Apr+is%3Aclosed
\
.. image:: http://slack.pinaxproject.com/badge.svg
:target: http://slack.pinaxproject.com/
.. image:: https://img.shields.io/badge/license-MIT-blue.svg
:target: https://opensource.org/licenses/MIT/
\
``pinax-calendars`` provides utilities for publishing events as a calendar.
Supported Django and Python Versions
------------------------------------
+-----------------+-----+-----+-----+
| Django / Python | 3.6 | 3.7 | 3.8 |
+=================+=====+=====+=====+
| 2.2 | * | * | * |
+-----------------+-----+-----+-----+
| 3.0 | * | * | * |
+-----------------+-----+-----+-----+
"""
setup(
author="Pinax Team",
author_email="team@pinaxproject.com",
description="Django utilities for publishing events as a calendar",
name="pinax-calendars",
long_description=LONG_DESCRIPTION,
version=VERSION,
url="http://github.com/pinax/pinax-calendars/",
license="MIT",
packages=find_packages(),
package_data={
"pinax.calendars": [
"templates/pinax/calendars/*"
]
},
install_requires=[
"django>=2.2",
"pytz",
],
test_suite="runtests.runtests",
tests_require=[
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
"Topic :: Software Development :: Libraries :: Python Modules",
],
zip_safe=False
)
| bsd-3-clause |
yewang15215/django | django/test/selenium.py | 109 | 3456 | from __future__ import unicode_literals
import sys
import unittest
from contextlib import contextmanager
from django.test import LiveServerTestCase, tag
from django.utils.module_loading import import_string
from django.utils.six import with_metaclass
from django.utils.text import capfirst
class SeleniumTestCaseBase(type(LiveServerTestCase)):
# List of browsers to dynamically create test classes for.
browsers = []
# Sentinel value to differentiate browser-specific instances.
browser = None
def __new__(cls, name, bases, attrs):
"""
Dynamically create new classes and add them to the test module when
multiple browsers specs are provided (e.g. --selenium=firefox,chrome).
"""
test_class = super(SeleniumTestCaseBase, cls).__new__(cls, name, bases, attrs)
# If the test class is either browser-specific or a test base, return it.
if test_class.browser or not any(name.startswith('test') and callable(value) for name, value in attrs.items()):
return test_class
elif test_class.browsers:
# Reuse the created test class to make it browser-specific.
# We can't rename it to include the browser name or create a
# subclass like we do with the remaining browsers as it would
# either duplicate tests or prevent pickling of its instances.
first_browser = test_class.browsers[0]
test_class.browser = first_browser
# Create subclasses for each of the remaining browsers and expose
# them through the test's module namespace.
module = sys.modules[test_class.__module__]
for browser in test_class.browsers[1:]:
browser_test_class = cls.__new__(
cls,
str("%s%s" % (capfirst(browser), name)),
(test_class,),
{'browser': browser, '__module__': test_class.__module__}
)
setattr(module, browser_test_class.__name__, browser_test_class)
return test_class
# If no browsers were specified, skip this class (it'll still be discovered).
return unittest.skip('No browsers specified.')(test_class)
@classmethod
def import_webdriver(cls, browser):
return import_string("selenium.webdriver.%s.webdriver.WebDriver" % browser)
def create_webdriver(self):
return self.import_webdriver(self.browser)()
@tag('selenium')
class SeleniumTestCase(with_metaclass(SeleniumTestCaseBase, LiveServerTestCase)):
implicit_wait = 10
@classmethod
def setUpClass(cls):
cls.selenium = cls.create_webdriver()
cls.selenium.implicitly_wait(cls.implicit_wait)
super(SeleniumTestCase, cls).setUpClass()
@classmethod
def _tearDownClassInternal(cls):
# quit() the WebDriver before attempting to terminate and join the
# single-threaded LiveServerThread to avoid a dead lock if the browser
# kept a connection alive.
if hasattr(cls, 'selenium'):
cls.selenium.quit()
super(SeleniumTestCase, cls)._tearDownClassInternal()
@contextmanager
def disable_implicit_wait(self):
"""Context manager that disables the default implicit wait."""
self.selenium.implicitly_wait(0)
try:
yield
finally:
self.selenium.implicitly_wait(self.implicit_wait)
| bsd-3-clause |
johnkeepmoving/oss-ftp | python27/win32/Lib/site-packages/cffi/setuptools_ext.py | 49 | 6158 | import os
try:
basestring
except NameError:
# Python 3.x
basestring = str
def error(msg):
from distutils.errors import DistutilsSetupError
raise DistutilsSetupError(msg)
def execfile(filename, glob):
# We use execfile() (here rewritten for Python 3) instead of
# __import__() to load the build script. The problem with
# a normal import is that in some packages, the intermediate
# __init__.py files may already try to import the file that
# we are generating.
with open(filename) as f:
src = f.read()
src += '\n' # Python 2.6 compatibility
code = compile(src, filename, 'exec')
exec(code, glob, glob)
def add_cffi_module(dist, mod_spec):
from cffi.api import FFI
if not isinstance(mod_spec, basestring):
error("argument to 'cffi_modules=...' must be a str or a list of str,"
" not %r" % (type(mod_spec).__name__,))
mod_spec = str(mod_spec)
try:
build_file_name, ffi_var_name = mod_spec.split(':')
except ValueError:
error("%r must be of the form 'path/build.py:ffi_variable'" %
(mod_spec,))
if not os.path.exists(build_file_name):
ext = ''
rewritten = build_file_name.replace('.', '/') + '.py'
if os.path.exists(rewritten):
ext = ' (rewrite cffi_modules to [%r])' % (
rewritten + ':' + ffi_var_name,)
error("%r does not name an existing file%s" % (build_file_name, ext))
mod_vars = {'__name__': '__cffi__', '__file__': build_file_name}
execfile(build_file_name, mod_vars)
try:
ffi = mod_vars[ffi_var_name]
except KeyError:
error("%r: object %r not found in module" % (mod_spec,
ffi_var_name))
if not isinstance(ffi, FFI):
ffi = ffi() # maybe it's a function instead of directly an ffi
if not isinstance(ffi, FFI):
error("%r is not an FFI instance (got %r)" % (mod_spec,
type(ffi).__name__))
if not hasattr(ffi, '_assigned_source'):
error("%r: the set_source() method was not called" % (mod_spec,))
module_name, source, source_extension, kwds = ffi._assigned_source
if ffi._windows_unicode:
kwds = kwds.copy()
ffi._apply_windows_unicode(kwds)
if source is None:
_add_py_module(dist, ffi, module_name)
else:
_add_c_module(dist, ffi, module_name, source, source_extension, kwds)
def _add_c_module(dist, ffi, module_name, source, source_extension, kwds):
from distutils.core import Extension
from distutils.command.build_ext import build_ext
from distutils.dir_util import mkpath
from distutils import log
from cffi import recompiler
allsources = ['$PLACEHOLDER']
allsources.extend(kwds.pop('sources', []))
ext = Extension(name=module_name, sources=allsources, **kwds)
def make_mod(tmpdir, pre_run=None):
c_file = os.path.join(tmpdir, module_name + source_extension)
log.info("generating cffi module %r" % c_file)
mkpath(tmpdir)
# a setuptools-only, API-only hook: called with the "ext" and "ffi"
# arguments just before we turn the ffi into C code. To use it,
# subclass the 'distutils.command.build_ext.build_ext' class and
# add a method 'def pre_run(self, ext, ffi)'.
if pre_run is not None:
pre_run(ext, ffi)
updated = recompiler.make_c_source(ffi, module_name, source, c_file)
if not updated:
log.info("already up-to-date")
return c_file
if dist.ext_modules is None:
dist.ext_modules = []
dist.ext_modules.append(ext)
base_class = dist.cmdclass.get('build_ext', build_ext)
class build_ext_make_mod(base_class):
def run(self):
if ext.sources[0] == '$PLACEHOLDER':
pre_run = getattr(self, 'pre_run', None)
ext.sources[0] = make_mod(self.build_temp, pre_run)
base_class.run(self)
dist.cmdclass['build_ext'] = build_ext_make_mod
# NB. multiple runs here will create multiple 'build_ext_make_mod'
# classes. Even in this case the 'build_ext' command should be
# run once; but just in case, the logic above does nothing if
# called again.
def _add_py_module(dist, ffi, module_name):
from distutils.dir_util import mkpath
from distutils.command.build_py import build_py
from distutils.command.build_ext import build_ext
from distutils import log
from cffi import recompiler
def generate_mod(py_file):
log.info("generating cffi module %r" % py_file)
mkpath(os.path.dirname(py_file))
updated = recompiler.make_py_source(ffi, module_name, py_file)
if not updated:
log.info("already up-to-date")
base_class = dist.cmdclass.get('build_py', build_py)
class build_py_make_mod(base_class):
def run(self):
base_class.run(self)
module_path = module_name.split('.')
module_path[-1] += '.py'
generate_mod(os.path.join(self.build_lib, *module_path))
dist.cmdclass['build_py'] = build_py_make_mod
# the following is only for "build_ext -i"
base_class_2 = dist.cmdclass.get('build_ext', build_ext)
class build_ext_make_mod(base_class_2):
def run(self):
base_class_2.run(self)
if self.inplace:
# from get_ext_fullpath() in distutils/command/build_ext.py
module_path = module_name.split('.')
package = '.'.join(module_path[:-1])
build_py = self.get_finalized_command('build_py')
package_dir = build_py.get_package_dir(package)
file_name = module_path[-1] + '.py'
generate_mod(os.path.join(package_dir, file_name))
dist.cmdclass['build_ext'] = build_ext_make_mod
def cffi_modules(dist, attr, value):
assert attr == 'cffi_modules'
if isinstance(value, basestring):
value = [value]
for cffi_module in value:
add_cffi_module(dist, cffi_module)
| mit |
druuu/django | tests/template_tests/test_custom.py | 152 | 20056 | from __future__ import unicode_literals
import os
from django.template import Context, Engine, TemplateSyntaxError
from django.template.base import Node
from django.template.library import InvalidTemplateLibrary
from django.test import SimpleTestCase, ignore_warnings
from django.test.utils import extend_sys_path
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from .templatetags import custom, inclusion
from .utils import ROOT
LIBRARIES = {
'custom': 'template_tests.templatetags.custom',
'inclusion': 'template_tests.templatetags.inclusion',
}
class CustomFilterTests(SimpleTestCase):
def test_filter(self):
engine = Engine(libraries=LIBRARIES)
t = engine.from_string("{% load custom %}{{ string|trim:5 }}")
self.assertEqual(
t.render(Context({"string": "abcdefghijklmnopqrstuvwxyz"})),
"abcde"
)
class TagTestCase(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(app_dirs=True, libraries=LIBRARIES)
super(TagTestCase, cls).setUpClass()
def verify_tag(self, tag, name):
self.assertEqual(tag.__name__, name)
self.assertEqual(tag.__doc__, 'Expected %s __doc__' % name)
self.assertEqual(tag.__dict__['anything'], 'Expected %s __dict__' % name)
class SimpleTagTests(TagTestCase):
def test_simple_tags(self):
c = Context({'value': 42})
templates = [
('{% load custom %}{% no_params %}', 'no_params - Expected result'),
('{% load custom %}{% one_param 37 %}', 'one_param - Expected result: 37'),
('{% load custom %}{% explicit_no_context 37 %}', 'explicit_no_context - Expected result: 37'),
('{% load custom %}{% no_params_with_context %}',
'no_params_with_context - Expected result (context value: 42)'),
('{% load custom %}{% params_and_context 37 %}',
'params_and_context - Expected result (context value: 42): 37'),
('{% load custom %}{% simple_two_params 37 42 %}', 'simple_two_params - Expected result: 37, 42'),
('{% load custom %}{% simple_one_default 37 %}', 'simple_one_default - Expected result: 37, hi'),
('{% load custom %}{% simple_one_default 37 two="hello" %}',
'simple_one_default - Expected result: 37, hello'),
('{% load custom %}{% simple_one_default one=99 two="hello" %}',
'simple_one_default - Expected result: 99, hello'),
('{% load custom %}{% simple_one_default 37 42 %}',
'simple_one_default - Expected result: 37, 42'),
('{% load custom %}{% simple_unlimited_args 37 %}', 'simple_unlimited_args - Expected result: 37, hi'),
('{% load custom %}{% simple_unlimited_args 37 42 56 89 %}',
'simple_unlimited_args - Expected result: 37, 42, 56, 89'),
('{% load custom %}{% simple_only_unlimited_args %}', 'simple_only_unlimited_args - Expected result: '),
('{% load custom %}{% simple_only_unlimited_args 37 42 56 89 %}',
'simple_only_unlimited_args - Expected result: 37, 42, 56, 89'),
('{% load custom %}{% simple_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" four=1|add:3 %}',
'simple_unlimited_args_kwargs - Expected result: 37, 42, 56 / eggs=scrambled, four=4'),
]
for entry in templates:
t = self.engine.from_string(entry[0])
self.assertEqual(t.render(c), entry[1])
for entry in templates:
t = self.engine.from_string("%s as var %%}Result: {{ var }}" % entry[0][0:-2])
self.assertEqual(t.render(c), "Result: %s" % entry[1])
def test_simple_tag_errors(self):
errors = [
("'simple_one_default' received unexpected keyword argument 'three'",
'{% load custom %}{% simple_one_default 99 two="hello" three="foo" %}'),
("'simple_two_params' received too many positional arguments",
'{% load custom %}{% simple_two_params 37 42 56 %}'),
("'simple_one_default' received too many positional arguments",
'{% load custom %}{% simple_one_default 37 42 56 %}'),
("'simple_unlimited_args_kwargs' received some positional argument(s) after some keyword argument(s)",
'{% load custom %}{% simple_unlimited_args_kwargs 37 40|add:2 eggs="scrambled" 56 four=1|add:3 %}'),
("'simple_unlimited_args_kwargs' received multiple values for keyword argument 'eggs'",
'{% load custom %}{% simple_unlimited_args_kwargs 37 eggs="scrambled" eggs="scrambled" %}'),
]
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
self.engine.from_string(entry[1])
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
self.engine.from_string("%s as var %%}" % entry[1][0:-2])
def test_simple_tag_escaping_autoescape_off(self):
c = Context({'name': "Jack & Jill"}, autoescape=False)
t = self.engine.from_string("{% load custom %}{% escape_naive %}")
self.assertEqual(t.render(c), "Hello Jack & Jill!")
def test_simple_tag_naive_escaping(self):
c = Context({'name': "Jack & Jill"})
t = self.engine.from_string("{% load custom %}{% escape_naive %}")
self.assertEqual(t.render(c), "Hello Jack & Jill!")
def test_simple_tag_explicit_escaping(self):
# Check we don't double escape
c = Context({'name': "Jack & Jill"})
t = self.engine.from_string("{% load custom %}{% escape_explicit %}")
self.assertEqual(t.render(c), "Hello Jack & Jill!")
def test_simple_tag_format_html_escaping(self):
# Check we don't double escape
c = Context({'name': "Jack & Jill"})
t = self.engine.from_string("{% load custom %}{% escape_format_html %}")
self.assertEqual(t.render(c), "Hello Jack & Jill!")
def test_simple_tag_registration(self):
# Test that the decorators preserve the decorated function's docstring, name and attributes.
self.verify_tag(custom.no_params, 'no_params')
self.verify_tag(custom.one_param, 'one_param')
self.verify_tag(custom.explicit_no_context, 'explicit_no_context')
self.verify_tag(custom.no_params_with_context, 'no_params_with_context')
self.verify_tag(custom.params_and_context, 'params_and_context')
self.verify_tag(custom.simple_unlimited_args_kwargs, 'simple_unlimited_args_kwargs')
self.verify_tag(custom.simple_tag_without_context_parameter, 'simple_tag_without_context_parameter')
def test_simple_tag_missing_context(self):
# The 'context' parameter must be present when takes_context is True
msg = (
"'simple_tag_without_context_parameter' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.from_string('{% load custom %}{% simple_tag_without_context_parameter 123 %}')
class InclusionTagTests(TagTestCase):
def test_inclusion_tags(self):
c = Context({'value': 42})
templates = [
('{% load inclusion %}{% inclusion_no_params %}', 'inclusion_no_params - Expected result\n'),
('{% load inclusion %}{% inclusion_one_param 37 %}', 'inclusion_one_param - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_explicit_no_context 37 %}',
'inclusion_explicit_no_context - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_no_params_with_context %}',
'inclusion_no_params_with_context - Expected result (context value: 42)\n'),
('{% load inclusion %}{% inclusion_params_and_context 37 %}',
'inclusion_params_and_context - Expected result (context value: 42): 37\n'),
('{% load inclusion %}{% inclusion_two_params 37 42 %}',
'inclusion_two_params - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_one_default 37 %}', 'inclusion_one_default - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_one_default 37 two="hello" %}',
'inclusion_one_default - Expected result: 37, hello\n'),
('{% load inclusion %}{% inclusion_one_default one=99 two="hello" %}',
'inclusion_one_default - Expected result: 99, hello\n'),
('{% load inclusion %}{% inclusion_one_default 37 42 %}',
'inclusion_one_default - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_unlimited_args 37 %}',
'inclusion_unlimited_args - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_unlimited_args 37 42 56 89 %}',
'inclusion_unlimited_args - Expected result: 37, 42, 56, 89\n'),
('{% load inclusion %}{% inclusion_only_unlimited_args %}',
'inclusion_only_unlimited_args - Expected result: \n'),
('{% load inclusion %}{% inclusion_only_unlimited_args 37 42 56 89 %}',
'inclusion_only_unlimited_args - Expected result: 37, 42, 56, 89\n'),
('{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" four=1|add:3 %}',
'inclusion_unlimited_args_kwargs - Expected result: 37, 42, 56 / eggs=scrambled, four=4\n'),
]
for entry in templates:
t = self.engine.from_string(entry[0])
self.assertEqual(t.render(c), entry[1])
def test_inclusion_tag_errors(self):
errors = [
("'inclusion_one_default' received unexpected keyword argument 'three'",
'{% load inclusion %}{% inclusion_one_default 99 two="hello" three="foo" %}'),
("'inclusion_two_params' received too many positional arguments",
'{% load inclusion %}{% inclusion_two_params 37 42 56 %}'),
("'inclusion_one_default' received too many positional arguments",
'{% load inclusion %}{% inclusion_one_default 37 42 56 %}'),
("'inclusion_one_default' did not receive value(s) for the argument(s): 'one'",
'{% load inclusion %}{% inclusion_one_default %}'),
("'inclusion_unlimited_args' did not receive value(s) for the argument(s): 'one'",
'{% load inclusion %}{% inclusion_unlimited_args %}'),
(
"'inclusion_unlimited_args_kwargs' received some positional argument(s) "
"after some keyword argument(s)",
'{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 40|add:2 eggs="boiled" 56 four=1|add:3 %}',
),
("'inclusion_unlimited_args_kwargs' received multiple values for keyword argument 'eggs'",
'{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 eggs="scrambled" eggs="scrambled" %}'),
]
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
self.engine.from_string(entry[1])
def test_include_tag_missing_context(self):
# The 'context' parameter must be present when takes_context is True
msg = (
"'inclusion_tag_without_context_parameter' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.from_string('{% load inclusion %}{% inclusion_tag_without_context_parameter 123 %}')
def test_inclusion_tags_from_template(self):
c = Context({'value': 42})
templates = [
('{% load inclusion %}{% inclusion_no_params_from_template %}',
'inclusion_no_params_from_template - Expected result\n'),
('{% load inclusion %}{% inclusion_one_param_from_template 37 %}',
'inclusion_one_param_from_template - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_explicit_no_context_from_template 37 %}',
'inclusion_explicit_no_context_from_template - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_no_params_with_context_from_template %}',
'inclusion_no_params_with_context_from_template - Expected result (context value: 42)\n'),
('{% load inclusion %}{% inclusion_params_and_context_from_template 37 %}',
'inclusion_params_and_context_from_template - Expected result (context value: 42): 37\n'),
('{% load inclusion %}{% inclusion_two_params_from_template 37 42 %}',
'inclusion_two_params_from_template - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_one_default_from_template 37 %}',
'inclusion_one_default_from_template - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_one_default_from_template 37 42 %}',
'inclusion_one_default_from_template - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_unlimited_args_from_template 37 %}',
'inclusion_unlimited_args_from_template - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_unlimited_args_from_template 37 42 56 89 %}',
'inclusion_unlimited_args_from_template - Expected result: 37, 42, 56, 89\n'),
('{% load inclusion %}{% inclusion_only_unlimited_args_from_template %}',
'inclusion_only_unlimited_args_from_template - Expected result: \n'),
('{% load inclusion %}{% inclusion_only_unlimited_args_from_template 37 42 56 89 %}',
'inclusion_only_unlimited_args_from_template - Expected result: 37, 42, 56, 89\n'),
]
for entry in templates:
t = self.engine.from_string(entry[0])
self.assertEqual(t.render(c), entry[1])
def test_inclusion_tag_registration(self):
# Test that the decorators preserve the decorated function's docstring, name and attributes.
self.verify_tag(inclusion.inclusion_no_params, 'inclusion_no_params')
self.verify_tag(inclusion.inclusion_one_param, 'inclusion_one_param')
self.verify_tag(inclusion.inclusion_explicit_no_context, 'inclusion_explicit_no_context')
self.verify_tag(inclusion.inclusion_no_params_with_context, 'inclusion_no_params_with_context')
self.verify_tag(inclusion.inclusion_params_and_context, 'inclusion_params_and_context')
self.verify_tag(inclusion.inclusion_two_params, 'inclusion_two_params')
self.verify_tag(inclusion.inclusion_one_default, 'inclusion_one_default')
self.verify_tag(inclusion.inclusion_unlimited_args, 'inclusion_unlimited_args')
self.verify_tag(inclusion.inclusion_only_unlimited_args, 'inclusion_only_unlimited_args')
self.verify_tag(inclusion.inclusion_tag_without_context_parameter, 'inclusion_tag_without_context_parameter')
self.verify_tag(inclusion.inclusion_tag_use_l10n, 'inclusion_tag_use_l10n')
self.verify_tag(inclusion.inclusion_tag_current_app, 'inclusion_tag_current_app')
self.verify_tag(inclusion.inclusion_unlimited_args_kwargs, 'inclusion_unlimited_args_kwargs')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_15070_current_app(self):
"""
Test that inclusion tag passes down `current_app` of context to the
Context of the included/rendered template as well.
"""
c = Context({})
t = self.engine.from_string('{% load inclusion %}{% inclusion_tag_current_app %}')
self.assertEqual(t.render(c).strip(), 'None')
# That part produces the deprecation warning
c = Context({}, current_app='advanced')
self.assertEqual(t.render(c).strip(), 'advanced')
def test_15070_use_l10n(self):
"""
Test that inclusion tag passes down `use_l10n` of context to the
Context of the included/rendered template as well.
"""
c = Context({})
t = self.engine.from_string('{% load inclusion %}{% inclusion_tag_use_l10n %}')
self.assertEqual(t.render(c).strip(), 'None')
c.use_l10n = True
self.assertEqual(t.render(c).strip(), 'True')
def test_no_render_side_effect(self):
"""
#23441 -- InclusionNode shouldn't modify its nodelist at render time.
"""
engine = Engine(app_dirs=True, libraries=LIBRARIES)
template = engine.from_string('{% load inclusion %}{% inclusion_no_params %}')
count = template.nodelist.get_nodes_by_type(Node)
template.render(Context({}))
self.assertEqual(template.nodelist.get_nodes_by_type(Node), count)
def test_render_context_is_cleared(self):
"""
#24555 -- InclusionNode should push and pop the render_context stack
when rendering. Otherwise, leftover values such as blocks from
extending can interfere with subsequent rendering.
"""
engine = Engine(app_dirs=True, libraries=LIBRARIES)
template = engine.from_string('{% load inclusion %}{% inclusion_extends1 %}{% inclusion_extends2 %}')
self.assertEqual(template.render(Context({})).strip(), 'one\ntwo')
class AssignmentTagTests(TagTestCase):
def test_assignment_tags(self):
c = Context({'value': 42})
t = self.engine.from_string('{% load custom %}{% assignment_no_params as var %}The result is: {{ var }}')
self.assertEqual(t.render(c), 'The result is: assignment_no_params - Expected result')
def test_assignment_tag_registration(self):
# Test that the decorators preserve the decorated function's docstring, name and attributes.
self.verify_tag(custom.assignment_no_params, 'assignment_no_params')
def test_assignment_tag_missing_context(self):
# The 'context' parameter must be present when takes_context is True
msg = (
"'assignment_tag_without_context_parameter' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.from_string('{% load custom %}{% assignment_tag_without_context_parameter 123 as var %}')
class TemplateTagLoadingTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.egg_dir = os.path.join(ROOT, 'eggs')
super(TemplateTagLoadingTests, cls).setUpClass()
def test_load_error(self):
msg = (
"Invalid template library specified. ImportError raised when "
"trying to load 'template_tests.broken_tag': cannot import name "
"'?Xtemplate'?"
)
with six.assertRaisesRegex(self, InvalidTemplateLibrary, msg):
Engine(libraries={
'broken_tag': 'template_tests.broken_tag',
})
def test_load_error_egg(self):
egg_name = '%s/tagsegg.egg' % self.egg_dir
msg = (
"Invalid template library specified. ImportError raised when "
"trying to load 'tagsegg.templatetags.broken_egg': cannot "
"import name '?Xtemplate'?"
)
with extend_sys_path(egg_name):
with six.assertRaisesRegex(self, InvalidTemplateLibrary, msg):
Engine(libraries={
'broken_egg': 'tagsegg.templatetags.broken_egg',
})
def test_load_working_egg(self):
ttext = "{% load working_egg %}"
egg_name = '%s/tagsegg.egg' % self.egg_dir
with extend_sys_path(egg_name):
engine = Engine(libraries={
'working_egg': 'tagsegg.templatetags.working_egg',
})
engine.from_string(ttext)
| bsd-3-clause |
sudheesh001/oh-mainline | vendor/packages/twisted/doc/conch/examples/sshsimpleserver.py | 18 | 3772 | #!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.cred import portal, checkers
from twisted.conch import error, avatar
from twisted.conch.checkers import SSHPublicKeyDatabase
from twisted.conch.ssh import factory, userauth, connection, keys, session
from twisted.internet import reactor, protocol, defer
from twisted.python import log
from zope.interface import implements
import sys
log.startLogging(sys.stderr)
"""
Example of running another protocol over an SSH channel.
log in with username "user" and password "password".
"""
class ExampleAvatar(avatar.ConchUser):
def __init__(self, username):
avatar.ConchUser.__init__(self)
self.username = username
self.channelLookup.update({'session':session.SSHSession})
class ExampleRealm:
implements(portal.IRealm)
def requestAvatar(self, avatarId, mind, *interfaces):
return interfaces[0], ExampleAvatar(avatarId), lambda: None
class EchoProtocol(protocol.Protocol):
"""this is our example protocol that we will run over SSH
"""
def dataReceived(self, data):
if data == '\r':
data = '\r\n'
elif data == '\x03': #^C
self.transport.loseConnection()
return
self.transport.write(data)
publicKey = 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEArzJx8OYOnJmzf4tfBEvLi8DVPrJ3/c9k2I/Az64fxjHf9imyRJbixtQhlH9lfNjUIx+4LmrJH5QNRsFporcHDKOTwTTYLh5KmRpslkYHRivcJSkbh/C+BR3utDS555mV'
privateKey = """-----BEGIN RSA PRIVATE KEY-----
MIIByAIBAAJhAK8ycfDmDpyZs3+LXwRLy4vA1T6yd/3PZNiPwM+uH8Yx3/YpskSW
4sbUIZR/ZXzY1CMfuC5qyR+UDUbBaaK3Bwyjk8E02C4eSpkabJZGB0Yr3CUpG4fw
vgUd7rQ0ueeZlQIBIwJgbh+1VZfr7WftK5lu7MHtqE1S1vPWZQYE3+VUn8yJADyb
Z4fsZaCrzW9lkIqXkE3GIY+ojdhZhkO1gbG0118sIgphwSWKRxK0mvh6ERxKqIt1
xJEJO74EykXZV4oNJ8sjAjEA3J9r2ZghVhGN6V8DnQrTk24Td0E8hU8AcP0FVP+8
PQm/g/aXf2QQkQT+omdHVEJrAjEAy0pL0EBH6EVS98evDCBtQw22OZT52qXlAwZ2
gyTriKFVoqjeEjt3SZKKqXHSApP/AjBLpF99zcJJZRq2abgYlf9lv1chkrWqDHUu
DZttmYJeEfiFBBavVYIF1dOlZT0G8jMCMBc7sOSZodFnAiryP+Qg9otSBjJ3bQML
pSTqy7c3a2AScC/YyOwkDaICHnnD3XyjMwIxALRzl0tQEKMXs6hH8ToUdlLROCrP
EhQ0wahUTCk1gKA4uPD6TMTChavbh4K63OvbKg==
-----END RSA PRIVATE KEY-----"""
class InMemoryPublicKeyChecker(SSHPublicKeyDatabase):
def checkKey(self, credentials):
return credentials.username == 'user' and \
keys.Key.fromString(data=publicKey).blob() == credentials.blob
class ExampleSession:
def __init__(self, avatar):
"""
We don't use it, but the adapter is passed the avatar as its first
argument.
"""
def getPty(self, term, windowSize, attrs):
pass
def execCommand(self, proto, cmd):
raise Exception("no executing commands")
def openShell(self, trans):
ep = EchoProtocol()
ep.makeConnection(trans)
trans.makeConnection(session.wrapProtocol(ep))
def eofReceived(self):
pass
def closed(self):
pass
from twisted.python import components
components.registerAdapter(ExampleSession, ExampleAvatar, session.ISession)
class ExampleFactory(factory.SSHFactory):
publicKeys = {
'ssh-rsa': keys.Key.fromString(data=publicKey)
}
privateKeys = {
'ssh-rsa': keys.Key.fromString(data=privateKey)
}
services = {
'ssh-userauth': userauth.SSHUserAuthServer,
'ssh-connection': connection.SSHConnection
}
portal = portal.Portal(ExampleRealm())
passwdDB = checkers.InMemoryUsernamePasswordDatabaseDontUse()
passwdDB.addUser('user', 'password')
portal.registerChecker(passwdDB)
portal.registerChecker(InMemoryPublicKeyChecker())
ExampleFactory.portal = portal
if __name__ == '__main__':
reactor.listenTCP(5022, ExampleFactory())
reactor.run()
| agpl-3.0 |
Punchcard123/DMX-AP | Tools/autotest/param_metadata/htmlemit.py | 62 | 2986 | #!/usr/bin/env python
import re
from param import *
from emit import Emit
import cgi
# Emit docs in a form acceptable to the APM wordpress docs site
class HtmlEmit(Emit):
def __init__(self):
html_fname = 'Parameters.html'
self.f = open(html_fname, mode='w')
self.preamble = '''<!-- Dynamically generated list of documented parameters
This page was generated using Tools/autotest/param_metadata/param_parse.py
DO NOT EDIT
-->
<p style="text-align: right"><a title="Table of Contents" href="/wiki/table-of-contents/"><span style="font-size: medium"><strong>Reference Manual Table of Contents</strong></span></a></p>
<h3 style="text-align: center">Complete Parameter List</h3>
<hr />
<p>This is a complete list of the parameters which can be set via the MAVLink protocol in the EEPROM of your APM to control vehicle behaviour. This list is automatically generated from the latest ardupilot source code, and so may contain parameters which are not yet in the stable released versions of the code.</p>
<!-- add auto-generated table of contents with "Table of Contents Plus" plugin -->
[toc exclude="Complete Parameter List"]
'''
self.t = ''
def escape(self, s):
s = s.replace(' ', '-')
s = s.replace(':', '-')
s = s.replace('(', '')
s = s.replace(')', '')
return s
def close(self):
self.f.write(self.preamble)
self.f.write(self.t)
self.f.close()
def start_libraries(self):
self.t += '\n\n<h1>Library Parameters</h1>\n\n'
def emit(self, g, f):
tag = '%s Parameters' % g.name
t = '\n\n<h1>%s</h1>\n' % tag
for param in g.params:
if not hasattr(param, 'DisplayName') or not hasattr(param, 'Description'):
continue
d = param.__dict__
tag = '%s (%s)' % (param.DisplayName, param.name)
t += '\n\n<h2>%s</h2>' % tag
if d.get('User',None) == 'Advanced':
t += '<em>Note: This parameter is for advanced users</em><br>'
t += "\n\n<p>%s</p>\n" % cgi.escape(param.Description)
t += "<ul>\n"
for field in param.__dict__.keys():
if field not in ['name', 'DisplayName', 'Description', 'User'] and field in known_param_fields:
if field == 'Values' and Emit.prog_values_field.match(param.__dict__[field]):
values = (param.__dict__[field]).split(',')
t += "<table><th>Value</th><th>Meaning</th>\n"
for value in values:
v = value.split(':')
t += "<tr><td>%s</td><td>%s</td></tr>\n" % (v[0], v[1])
t += "</table>\n"
else:
t += "<li>%s: %s</li>\n" % (field, cgi.escape(param.__dict__[field]))
t += "</ul>\n"
self.t += t
| gpl-3.0 |
wfxiang08/changes | changes/api/diff_build_retry.py | 2 | 3554 | import logging
import uuid
from sqlalchemy.orm import joinedload, subqueryload_all
from changes.api.base import APIView, error
from changes.api.build_index import create_build, get_build_plans
from changes.constants import Cause, Result, Status
from changes.models import (
Build, PhabricatorDiff, Project, ProjectConfigError, ProjectStatus,
ProjectOptionsHelper
)
from changes.utils.diff_parser import DiffParser
from changes.utils.project_trigger import files_changed_should_trigger_project
from changes.vcs.base import InvalidDiffError
class DiffBuildRetryAPIView(APIView):
def post(self, diff_id):
"""
Ask Changes to restart all builds for this diff. The response will be
the list of all builds.
"""
diff = self._get_diff_by_id(diff_id)
if not diff:
return error("Diff with ID %s does not exist." % (diff_id,))
diff_parser = DiffParser(diff.source.patch.diff)
files_changed = diff_parser.get_changed_files()
try:
projects = self._get_projects_for_diff(diff, files_changed)
except InvalidDiffError:
return error('Patch does not apply')
except ProjectConfigError:
return error('Project config is not in a valid format.')
collection_id = uuid.uuid4()
builds = self._get_builds_for_diff(diff)
new_builds = []
for project in projects:
builds_for_project = [x for x in builds if x.project_id == project.id]
if not builds_for_project:
logging.warning('Project with id %s does not have a build.', project.id)
continue
build = max(builds_for_project, key=lambda x: x.number)
if build.status is not Status.finished:
continue
if build.result is Result.passed:
continue
new_build = create_build(
project=project,
collection_id=collection_id,
label=build.label,
target=build.target,
message=build.message,
author=build.author,
source=diff.source,
cause=Cause.retry,
)
new_builds.append(new_build)
return self.respond(new_builds)
def _get_diff_by_id(self, diff_id):
diffs = list(PhabricatorDiff.query.options(
joinedload('source').joinedload('patch')
).filter(
PhabricatorDiff.diff_id == diff_id
))
if not diffs:
return None
assert len(diffs) == 1
return diffs[0]
def _get_projects_for_diff(self, diff, files_changed):
projects = list(Project.query.options(
subqueryload_all('plans'),
).filter(
Project.status == ProjectStatus.active,
Project.repository_id == diff.source.repository_id,
))
project_options = ProjectOptionsHelper.get_options(projects, ['build.file-whitelist', 'phabricator.diff-trigger'])
projects = [
x for x in projects
if get_build_plans(x) and
project_options[x.id].get('phabricator.diff-trigger', '1') == '1' and
files_changed_should_trigger_project(files_changed, x, project_options[x.id], diff.source.revision_sha, diff=diff.source.patch.diff)
]
return projects
def _get_builds_for_diff(self, diff):
builds = list(Build.query.filter(
Build.source_id == diff.source.id
))
return builds
| apache-2.0 |
neilhan/tensorflow | tensorflow/python/kernel_tests/spacetobatch_op_test.py | 21 | 24519 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for SpaceToBatch and BatchToSpace ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import gen_array_ops
def space_to_batch_direct(input_array, block_shape, paddings):
"""Direct Python implementation of space-to-batch conversion.
This is used for tests only.
Args:
input_array: N-D array
block_shape: 1-D array of shape [num_block_dims].
paddings: 2-D array of shape [num_block_dims, 2].
Returns:
Converted tensor.
"""
input_array = np.array(input_array)
block_shape = np.array(block_shape)
num_block_dims = len(block_shape)
paddings = np.array(paddings).reshape((len(block_shape), 2))
padded = np.pad(input_array,
pad_width=([[0, 0]] + list(paddings) + [[0, 0]] *
(input_array.ndim - 1 - num_block_dims)),
mode="constant")
reshaped_padded_shape = [input_array.shape[0]]
output_shape = [input_array.shape[0] * np.prod(block_shape)]
for block_dim, block_shape_value in enumerate(block_shape):
reduced_size = padded.shape[block_dim + 1] // block_shape_value
reshaped_padded_shape.append(reduced_size)
output_shape.append(reduced_size)
reshaped_padded_shape.append(block_shape_value)
reshaped_padded_shape.extend(input_array.shape[num_block_dims + 1:])
output_shape.extend(input_array.shape[num_block_dims + 1:])
reshaped_padded = padded.reshape(reshaped_padded_shape)
permuted_reshaped_padded = np.transpose(reshaped_padded, (
list(np.arange(num_block_dims) * 2 + 2) + [0] +
list(np.arange(num_block_dims) * 2 + 1) + list(np.arange(
input_array.ndim - num_block_dims - 1) + 1 + num_block_dims * 2)))
return permuted_reshaped_padded.reshape(output_shape)
class PythonOpImpl(object):
@staticmethod
def space_to_batch(*args, **kwargs):
return tf.space_to_batch(*args, **kwargs)
@staticmethod
def batch_to_space(*args, **kwargs):
return tf.batch_to_space(*args, **kwargs)
class CppOpImpl(object):
@staticmethod
def space_to_batch(*args, **kwargs):
return gen_array_ops._space_to_batch(*args, **kwargs)
@staticmethod
def batch_to_space(*args, **kwargs):
return gen_array_ops._batch_to_space(*args, **kwargs)
class SpaceToBatchTest(tf.test.TestCase, PythonOpImpl):
"""Tests input-output pairs for the SpaceToBatch and BatchToSpace ops.
This uses the Python compatibility wrapper that forwards to space_to_batch_nd.
"""
def _testPad(self, inputs, paddings, block_size, outputs):
with self.test_session(use_gpu=True):
# outputs = space_to_batch(inputs)
x_tf = self.space_to_batch(
tf.to_float(inputs),
paddings, block_size=block_size)
self.assertAllEqual(x_tf.eval(), outputs)
# inputs = batch_to_space(outputs)
x_tf = self.batch_to_space(
tf.to_float(outputs),
paddings, block_size=block_size)
self.assertAllEqual(x_tf.eval(), inputs)
def _testOne(self, inputs, block_size, outputs):
paddings = np.zeros((2, 2), dtype=np.int32)
self._testPad(inputs, paddings, block_size, outputs)
# [1, 2, 2, 1] <-> [4, 1, 1, 1]
def testSmallInput2x2(self):
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 2
x_out = [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
self._testOne(x_np, block_size, x_out)
# [1, 2, 2, 1] <-> [1, 3, 3, 1] (padding) <-> [9, 1, 1, 1]
def testSmallInput2x2Pad1x0(self):
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.array([[1, 0], [1, 0]], dtype=np.int32)
block_size = 3
x_out = [[[[0]]], [[[0]]], [[[0]]],
[[[0]]], [[[1]]], [[[2]]],
[[[0]]], [[[3]]], [[[4]]]]
self._testPad(x_np, paddings, block_size, x_out)
# Test with depth larger than 1.
# [1, 2, 2, 3] <-> [4, 1, 1, 3]
def testDepthInput2x2(self):
x_np = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Test for larger input dimensions.
# [1, 4, 4, 1] <-> [4, 2, 2, 1]
def testLargerInput2x2(self):
x_np = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]],
[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]]
block_size = 2
x_out = [[[[1], [3]], [[9], [11]]],
[[[2], [4]], [[10], [12]]],
[[[5], [7]], [[13], [15]]],
[[[6], [8]], [[14], [16]]]]
self._testOne(x_np, block_size, x_out)
# Test with batch larger than 1.
# [2, 2, 4, 1] <-> [8, 1, 2, 1]
def testBatchInput2x2(self):
x_np = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]]],
[[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]]
block_size = 2
x_out = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
[[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input spatial dimensions AND batch larger than 1, to ensure
# that elements are correctly laid out spatially and properly interleaved
# along the batch dimension.
# [2, 4, 4, 1] <-> [8, 2, 2, 1]
def testLargerInputBatch2x2(self):
x_np = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]],
[[9], [10], [11], [12]],
[[13], [14], [15], [16]]],
[[[17], [18], [19], [20]],
[[21], [22], [23], [24]],
[[25], [26], [27], [28]],
[[29], [30], [31], [32]]]]
x_out = [[[[1], [3]], [[9], [11]]],
[[[17], [19]], [[25], [27]]],
[[[2], [4]], [[10], [12]]],
[[[18], [20]], [[26], [28]]],
[[[5], [7]], [[13], [15]]],
[[[21], [23]], [[29], [31]]],
[[[6], [8]], [[14], [16]]],
[[[22], [24]], [[30], [32]]]]
block_size = 2
self._testOne(x_np, block_size, x_out)
class SpaceToBatchCppTest(SpaceToBatchTest, CppOpImpl):
"""Tests input-output pairs for the SpaceToBatch and BatchToSpace ops.
This uses the C++ ops.
"""
pass
class SpaceToBatchNDTest(tf.test.TestCase):
"""Tests input-output pairs for the SpaceToBatchND and BatchToSpaceND ops."""
def _testPad(self, inputs, block_shape, paddings, outputs):
block_shape = np.array(block_shape)
paddings = np.array(paddings).reshape((len(block_shape), 2))
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
# outputs = space_to_batch(inputs)
x_tf = tf.space_to_batch_nd(tf.to_float(inputs), block_shape, paddings)
self.assertAllEqual(x_tf.eval(), outputs)
# inputs = batch_to_space(outputs)
x_tf = tf.batch_to_space_nd(tf.to_float(outputs), block_shape, paddings)
self.assertAllEqual(x_tf.eval(), inputs)
def _testDirect(self, input_shape, block_shape, paddings):
inputs = np.arange(np.prod(input_shape), dtype=np.float32)
inputs = inputs.reshape(input_shape)
self._testPad(inputs, block_shape, paddings,
space_to_batch_direct(inputs, block_shape, paddings))
def testZeroBlockDimsZeroRemainingDims(self):
self._testPad(inputs=[1, 2],
block_shape=[],
paddings=[],
outputs=[1, 2],)
def testZeroBlockDimsOneRemainingDim(self):
self._testPad(inputs=[[1, 2], [3, 4]],
block_shape=[],
paddings=[],
outputs=[[1, 2], [3, 4]])
# Same thing, but with a no-op block dim.
self._testPad(inputs=[[1, 2], [3, 4]],
block_shape=[1],
paddings=[[0, 0]],
outputs=[[1, 2], [3, 4]])
def testZeroBlockDimsTwoRemainingDims(self):
self._testPad(inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
block_shape=[],
paddings=[],
outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
# Same thing, but with a no-op block dim.
self._testPad(inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
block_shape=[1],
paddings=[[0, 0]],
outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
# Same thing, but with two no-op block dims.
self._testPad(inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
block_shape=[1, 1],
paddings=[[0, 0], [0, 0]],
outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
def testOneBlockDimZeroRemainingDims(self):
self._testPad(inputs=[[1, 2, 3], [4, 5, 6]],
block_shape=[2],
paddings=[1, 0],
outputs=[[0, 2], [0, 5], [1, 3], [4, 6]])
def testOneBlockDimOneRemainingDim(self):
self._testPad(
inputs=[[[1, 11], [2, 21], [3, 31]], [[4, 41], [5, 51], [6, 61]]],
block_shape=[2],
paddings=[1, 0],
outputs=[[[0, 0], [2, 21]], [[0, 0], [5, 51]], [[1, 11], [3, 31]],
[[4, 41], [6, 61]]])
def testDirect(self):
# Test with zero-size remaining dimension.
self._testDirect(input_shape=[3, 1, 2, 0],
block_shape=[3],
paddings=[[0, 2]])
# Test with zero-size blocked dimension.
self._testDirect(input_shape=[3, 0, 2, 5],
block_shape=[3],
paddings=[[0, 0]])
# Test with padding up from zero size.
self._testDirect(input_shape=[3, 0, 2, 5],
block_shape=[3],
paddings=[[1, 2]])
self._testDirect(input_shape=[3, 3, 4, 5, 2],
block_shape=[3, 4, 2],
paddings=[[1, 2], [0, 0], [3, 0]])
self._testDirect(input_shape=[3, 3, 4, 5, 2],
block_shape=[3, 4, 2, 2],
paddings=[[1, 2], [0, 0], [3, 0], [0, 0]])
self._testDirect(input_shape=[3, 2, 2, 3, 4, 5, 2, 5],
block_shape=[1, 1, 3, 4, 2, 2],
paddings=[[0, 0], [0, 0], [1, 2], [0, 0], [3, 0], [0, 0]])
self._testDirect(
input_shape=[3, 2, 2, 3, 4, 5, 2, 5],
block_shape=[1, 1, 3, 4, 2, 2, 1],
paddings=[[0, 0], [0, 0], [1, 2], [0, 0], [3, 0], [0, 0], [0, 0]])
class SpaceToBatchSpaceToDepth(tf.test.TestCase, PythonOpImpl):
# Verifies that: space_to_batch(x) = transpose(space_to_depth(transpose(x)))
def testSpaceToDepthTranspose(self):
x = np.arange(5 * 10 * 16 * 7, dtype=np.float32).reshape([5, 10, 16, 7])
block_size = 2
paddings = np.zeros((2, 2), dtype=np.int32)
y1 = self.space_to_batch(x, paddings, block_size=block_size)
y2 = tf.transpose(
tf.space_to_depth(
tf.transpose(x, [3, 1, 2, 0]),
block_size=block_size), [3, 1, 2, 0])
with self.test_session(use_gpu=True):
self.assertAllEqual(y1.eval(), y2.eval())
class SpaceToBatchSpaceToDepthCpp(SpaceToBatchSpaceToDepth, CppOpImpl):
pass
class SpaceToBatchErrorHandlingTest(tf.test.TestCase, PythonOpImpl):
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
x_np = [[[1], [2]], [[3], [4]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 2
with self.assertRaises(ValueError):
_ = self.space_to_batch(x_np, paddings, block_size)
def testBlockSize0(self):
# The block size is 0.
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 0
with self.assertRaises(ValueError):
out_tf = self.space_to_batch(x_np, paddings, block_size)
out_tf.eval()
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 1
with self.assertRaises(ValueError):
out_tf = self.space_to_batch(x_np, paddings, block_size)
out_tf.eval()
def testBlockSizeLarger(self):
# The block size is too large for this input.
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 10
with self.assertRaises(ValueError):
out_tf = self.space_to_batch(x_np, paddings, block_size)
out_tf.eval()
def testBlockSizeNotDivisibleWidth(self):
# The block size divides width but not height.
x_np = [[[[1], [2], [3]], [[3], [4], [7]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 3
with self.assertRaises(ValueError):
_ = self.space_to_batch(x_np, paddings, block_size)
def testBlockSizeNotDivisibleHeight(self):
# The block size divides height but not width.
x_np = [[[[1], [2]], [[3], [4]], [[5], [6]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 3
with self.assertRaises(ValueError):
_ = self.space_to_batch(x_np, paddings, block_size)
def testBlockSizeNotDivisibleBoth(self):
# The block size does not divide neither width or height.
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.zeros((2, 2), dtype=np.int32)
block_size = 3
with self.assertRaises(ValueError):
_ = self.space_to_batch(x_np, paddings, block_size)
def testUnknownShape(self):
t = self.space_to_batch(
tf.placeholder(tf.float32),
tf.placeholder(tf.int32),
block_size=4)
self.assertEqual(4, t.get_shape().ndims)
class SpaceToBatchErrorHandlingCppTest(SpaceToBatchErrorHandlingTest,
CppOpImpl):
pass
class SpaceToBatchNDErrorHandlingTest(tf.test.TestCase):
def _testStaticShape(self, input_shape, block_shape, paddings, error):
block_shape = np.array(block_shape)
paddings = np.array(paddings)
# Try with sizes known at graph construction time.
with self.assertRaises(error):
_ = tf.space_to_batch_nd(
np.zeros(input_shape, np.float32), block_shape, paddings)
def _testDynamicShape(self, input_shape, block_shape, paddings):
block_shape = np.array(block_shape)
paddings = np.array(paddings)
# Try with sizes unknown at graph construction time.
input_placeholder = tf.placeholder(tf.float32)
block_shape_placeholder = tf.placeholder(tf.int32, shape=block_shape.shape)
paddings_placeholder = tf.placeholder(tf.int32)
t = tf.space_to_batch_nd(input_placeholder, block_shape_placeholder,
paddings_placeholder)
with self.assertRaises(ValueError):
_ = t.eval({input_placeholder: np.zeros(input_shape, np.float32),
block_shape_placeholder: block_shape,
paddings_placeholder: paddings})
def _testShape(self, input_shape, block_shape, paddings, error):
self._testStaticShape(input_shape, block_shape, paddings, error)
self._testDynamicShape(input_shape, block_shape, paddings)
def testBlockSize0(self):
# The block size is 0.
self._testShape([1, 2, 2], [0, 2], [[0, 0], [0, 0]], ValueError)
def testBlockSizeNegative(self):
self._testShape([1, 2, 2], [-1, 2], [[0, 0], [0, 0]], ValueError)
def testNegativePadding(self):
# The padding is negative.
self._testShape([1, 2, 2], [1, 1], [[0, -1], [0, 0]], ValueError)
def testBlockSizeNotDivisible(self):
# The padded size is not divisible by the block size.
self._testShape([1, 2, 3, 1], [3, 3], [[0, 0], [0, 0]], ValueError)
def testBlockDimsMismatch(self):
# Shape of block_shape does not match shape of paddings.
self._testStaticShape([1, 3, 3, 1], [3, 3], [[0, 0]], ValueError)
def testUnknown(self):
# Verify that input shape and paddings shape can be unknown.
_ = tf.space_to_batch_nd(
tf.placeholder(tf.float32),
tf.placeholder(tf.int32, shape=(2,)),
tf.placeholder(tf.int32))
# Only number of input dimensions is known.
t = tf.space_to_batch_nd(
tf.placeholder(tf.float32, shape=(None, None, None, None)),
tf.placeholder(tf.int32, shape=(2,)),
tf.placeholder(tf.int32))
self.assertEqual(4, t.get_shape().ndims)
# Dimensions are partially known.
t = tf.space_to_batch_nd(
tf.placeholder(tf.float32, shape=(None, None, None, 2)),
tf.placeholder(tf.int32, shape=(2,)),
tf.placeholder(tf.int32))
self.assertEqual([None, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
t = tf.space_to_batch_nd(
tf.placeholder(tf.float32, shape=(3, None, None, 2)), [2, 3],
tf.placeholder(tf.int32))
self.assertEqual([3 * 2 * 3, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
t = tf.space_to_batch_nd(
tf.placeholder(tf.float32, shape=(3, None, 2, 2)), [2, 3],
[[1, 1], [0, 1]])
self.assertEqual([3 * 2 * 3, None, 1, 2], t.get_shape().as_list())
# Dimensions are fully known.
t = tf.space_to_batch_nd(
tf.placeholder(tf.float32, shape=(3, 2, 3, 2)), [2, 3],
[[1, 1], [0, 0]])
self.assertEqual([3 * 2 * 3, 2, 1, 2], t.get_shape().as_list())
class SpaceToBatchGradientTest(tf.test.TestCase, PythonOpImpl):
# Check the gradients.
def _checkGrad(self, x, paddings, block_size):
assert 4 == x.ndim
with self.test_session(use_gpu=True):
tf_x = tf.convert_to_tensor(x)
tf_y = self.space_to_batch(tf_x, paddings, block_size)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = tf.test.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for space_to_batch of x which is a four dimensional
# tensor of shape [b, h * block_size, w * block_size, d].
def _compare(self, b, h, w, d, block_size, pad_beg, pad_end):
block_size_sq = block_size * block_size
x = np.random.normal(0, 1, b * h * w * d *
block_size_sq).astype(np.float32).reshape(
[b, h * block_size, w * block_size, d])
paddings = np.array([[pad_beg, pad_end], [pad_beg, pad_end]],
dtype=np.int32)
self._checkGrad(x, paddings, block_size)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
block_size = 2
pad_beg = 0
pad_end = 0
self._compare(1, 2, 3, 5, block_size, pad_beg, pad_end)
def testSmall2(self):
block_size = 2
pad_beg = 0
pad_end = 0
self._compare(2, 4, 3, 2, block_size, pad_beg, pad_end)
def testSmallPad1x1(self):
block_size = 2
pad_beg = 1
pad_end = 1
self._compare(1, 2, 3, 5, block_size, pad_beg, pad_end)
class SpaceToBatchGradientCppTest(SpaceToBatchGradientTest, CppOpImpl):
pass
class SpaceToBatchNDGradientTest(tf.test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_shape, paddings):
block_shape = np.array(block_shape)
paddings = np.array(paddings).reshape((len(block_shape), 2))
with self.test_session():
tf_x = tf.convert_to_tensor(x)
tf_y = tf.space_to_batch_nd(tf_x, block_shape, paddings)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = tf.test.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
def _compare(self, input_shape, block_shape, paddings):
x = np.random.normal(
0, 1, np.prod(input_shape)).astype(np.float32).reshape(input_shape)
self._checkGrad(x, block_shape, paddings)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
self._compare([1, 4, 6, 5], [2, 2], [[0, 0], [0, 0]])
def testSmall2(self):
self._compare([2, 8, 6, 2], [2, 2], [[0, 0], [0, 0]])
def testSmallPad1(self):
self._compare([2, 4, 6, 2], [2, 2], [[1, 1], [1, 1]])
def testSmallPadThreeBlockDims(self):
self._compare([2, 2, 4, 3, 2], [2, 2, 2], [[1, 1], [1, 1], [1, 0]])
class RequiredSpaceToBatchPaddingsTest(tf.test.TestCase):
def _checkProperties(self, input_shape, block_shape, base_paddings, paddings,
crops):
"""Checks that `paddings` and `crops` satisfy invariants."""
num_block_dims = len(block_shape)
self.assertEqual(len(input_shape), num_block_dims)
if base_paddings is None:
base_paddings = np.zeros((num_block_dims, 2), np.int32)
self.assertEqual(base_paddings.shape, (num_block_dims, 2))
self.assertEqual(paddings.shape, (num_block_dims, 2))
self.assertEqual(crops.shape, (num_block_dims, 2))
for i in range(num_block_dims):
self.assertEqual(paddings[i, 0], base_paddings[i, 0])
self.assertLessEqual(0, paddings[i, 1] - base_paddings[i, 1])
self.assertLess(paddings[i, 1] - base_paddings[i, 1], block_shape[i])
self.assertEqual((input_shape[i] + paddings[i, 0] + paddings[i, 1]) %
block_shape[i], 0)
self.assertEqual(crops[i, 0], 0)
self.assertEqual(crops[i, 1], paddings[i, 1] - base_paddings[i, 1])
def _test(self, input_shape, block_shape, base_paddings):
input_shape = np.array(input_shape)
block_shape = np.array(block_shape)
if base_paddings is not None:
base_paddings = np.array(base_paddings)
# Check with constants.
paddings, crops = tf.required_space_to_batch_paddings(
input_shape, block_shape, base_paddings)
paddings_const = tensor_util.constant_value(paddings)
crops_const = tensor_util.constant_value(crops)
self.assertIsNotNone(paddings_const)
self.assertIsNotNone(crops_const)
self._checkProperties(input_shape, block_shape, base_paddings,
paddings_const, crops_const)
# Check with non-constants.
assignments = {}
input_shape_placeholder = tf.placeholder(tf.int32)
assignments[input_shape_placeholder] = input_shape
block_shape_placeholder = tf.placeholder(tf.int32, [len(block_shape)])
assignments[block_shape_placeholder] = block_shape
if base_paddings is not None:
base_paddings_placeholder = tf.placeholder(tf.int32,
[len(block_shape), 2])
assignments[base_paddings_placeholder] = base_paddings
else:
base_paddings_placeholder = None
t_paddings, t_crops = tf.required_space_to_batch_paddings(
input_shape_placeholder, block_shape_placeholder,
base_paddings_placeholder)
with self.test_session():
paddings_result = t_paddings.eval(assignments)
crops_result = t_crops.eval(assignments)
self.assertAllEqual(paddings_result, paddings_const)
self.assertAllEqual(crops_result, crops_const)
def testSimple(self):
self._test(input_shape=np.zeros((0,), np.int32),
block_shape=np.zeros((0,), np.int32),
base_paddings=None)
self._test(input_shape=np.zeros((0,), np.int32),
block_shape=np.zeros((0,), np.int32),
base_paddings=np.zeros((0, 2), np.int32))
self._test(input_shape=[1], block_shape=[2], base_paddings=None)
self._test(input_shape=[1], block_shape=[2], base_paddings=[[1, 0]])
self._test(input_shape=[3], block_shape=[1], base_paddings=[[1, 2]])
self._test(input_shape=[1], block_shape=[2], base_paddings=[[2, 3]])
self._test(input_shape=[4, 5], block_shape=[3, 2], base_paddings=None)
self._test(input_shape=[4, 5],
block_shape=[3, 2],
base_paddings=[[0, 0], [0, 1]])
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
rjungbeck/rasterizer | mupdf12.py | 1 | 2411 | from mupdfbase import MuPdfBase, Matrix, Rect, BBox
from ctypes import cdll,c_float, c_int, c_void_p, Structure, c_char_p,POINTER
FZ_STORE_UNLIMITED=0
class MuPdf(MuPdfBase):
def __init__(self):
self.dll=cdll.libmupdf
self.dll.fz_bound_page.argtypes=[c_void_p, c_void_p, POINTER(Rect)]
self.dll.fz_bound_page.restype=POINTER(Rect)
self.dll.fz_new_pixmap_with_bbox.argtypes=[c_void_p, c_void_p, POINTER(BBox)]
self.dll.fz_new_pixmap_with_bbox.restype=c_void_p
self.dll.fz_run_page.argtypes=[c_void_p, c_void_p, c_void_p, POINTER(Matrix), c_void_p]
self.dll.fz_run_page.restype=None
self.dll.fz_write_pam.argtypes=[c_void_p, c_void_p, c_char_p, c_int]
self.dll.fz_write_pam.restype=None
self.dll.fz_write_pbm.argtypes=[c_void_p, c_void_p, c_char_p]
self.dll.fz_write_pbm.restype=None
self.dll.fz_count_pages.argtypes=[c_void_p]
self.dll.fz_count_pages.restype=c_int
self.dll.fz_open_document_with_stream.argtypes=[c_void_p, c_char_p, c_void_p]
self.dll.fz_open_document_with_stream.restype=c_void_p
self.dll.fz_close_document.argtypes=[c_void_p]
self.dll.fz_close_document.restype=None
self.dll.fz_free_page.argtypes=[c_void_p, c_void_p]
self.dll.fz_free_page.restype=None
self.dll.fz_find_device_colorspace.argtypes=[c_void_p, c_char_p]
self.dll.fz_find_device_colorspace.restype=c_void_p
MuPdfBase.__init__(self)
def getSize(self):
rect=Rect()
self.dll.fz_bound_page(self.doc, self.page,rect)
return rect.x0, rect.y0, rect.x1, rect.y1
def getPageCount(self):
return self.dll.fz_count_pages(self.doc)
def loadPage(self, num):
self.page=self.dll.fz_load_page(self.doc, num-1)
def runPage(self, dev, transform):
self.dll.fz_run_page(self.doc, self.page, dev, transform, None)
def freePage(self):
self.dll.fz_free_page(self.doc, self.page)
self.page=None
def loadDocument(self, context, stream):
self.doc=self.dll.fz_open_document_with_stream(self.context, "application/pdf", self.stream)
def closeDocument(self):
if self.doc:
self.dll.fz_close_document(self.doc)
self.doc=None
def findColorspace(self, colorSpace):
return self.dll.fz_find_device_colorspace(self.context, colorSpace)
def setContext(self):
self.context=self.dll.fz_new_context(None, None, FZ_STORE_UNLIMITED)
| agpl-3.0 |
philippeowagner/django-hvad | hvad/tests/queryset_override.py | 6 | 13863 | # -*- coding: utf-8 -*-
from django.utils import translation
from hvad.test_utils.data import QONORMAL
from hvad.test_utils.testcase import HvadTestCase
from hvad.test_utils.project.app.models import QONormal, QOSimpleRelated, QOMany
from hvad.test_utils.fixtures import QONormalFixture
from hvad.manager import TranslationQueryset
class BasicTests(HvadTestCase):
def test_queryset_class(self):
self.assertIsInstance(QONormal.objects.all(), TranslationQueryset)
self.assertIsInstance(QONormal.objects.language(), TranslationQueryset)
self.assertNotIsInstance(QONormal.objects.untranslated(), TranslationQueryset)
self.assertIs(QONormal._default_manager, QONormal.objects)
self.assertIsInstance(QOSimpleRelated.objects.all(), TranslationQueryset)
self.assertIsInstance(QOSimpleRelated.objects.language(), TranslationQueryset)
self.assertNotIsInstance(QOSimpleRelated.objects.untranslated(), TranslationQueryset)
self.assertIs(QOSimpleRelated._default_manager, QOSimpleRelated.objects)
self.assertIsInstance(QOMany.objects.all(), TranslationQueryset)
self.assertIsInstance(QOMany.objects.language(), TranslationQueryset)
self.assertNotIsInstance(QOMany.objects.untranslated(), TranslationQueryset)
self.assertIs(QOMany._default_manager, QOMany.objects)
class FilterTests(HvadTestCase, QONormalFixture):
""" Basic filter tests - should work without issue as manager is just
as pass-through in those cases
"""
qonormal_count = 2
def test_simple_filter(self):
with translation.override('en'):
with self.assertNumQueries(2):
qs = QONormal.objects.filter(shared_field__contains='2')
self.assertEqual(qs.count(), 1)
obj = qs[0]
with self.assertNumQueries(0):
self.assertEqual(obj.shared_field, QONORMAL[2].shared_field)
self.assertEqual(obj.translated_field, QONORMAL[2].translated_field['en'])
with translation.override('ja'):
with self.assertNumQueries(2):
qs = QONormal.objects.filter(shared_field__contains='1')
self.assertEqual(qs.count(), 1)
obj = qs[0]
with self.assertNumQueries(0):
self.assertEqual(obj.shared_field, QONORMAL[1].shared_field)
self.assertEqual(obj.translated_field, QONORMAL[1].translated_field['ja'])
def test_translated_filter(self):
with translation.override('en'):
with self.assertNumQueries(2):
qs = QONormal.objects.filter(translated_field__contains='English').order_by('shared_field')
self.assertEqual(qs.count(), 2)
obj1, obj2 = qs
with self.assertNumQueries(0):
self.assertEqual(obj1.shared_field, QONORMAL[1].shared_field)
self.assertEqual(obj1.translated_field, QONORMAL[1].translated_field['en'])
self.assertEqual(obj2.shared_field, QONORMAL[2].shared_field)
self.assertEqual(obj2.translated_field, QONORMAL[2].translated_field['en'])
def test_deferred_language_filter(self):
with translation.override('ja'):
qs = QONormal.objects.filter(translated_field__contains='English').order_by('shared_field')
with translation.override('en'):
self.assertEqual(qs.count(), 2)
obj1, obj2 = qs
with translation.override('ja'):
self.assertEqual(obj1.shared_field, QONORMAL[1].shared_field)
self.assertEqual(obj1.translated_field, QONORMAL[1].translated_field['en'])
self.assertEqual(obj2.shared_field, QONORMAL[2].shared_field)
self.assertEqual(obj2.translated_field, QONORMAL[2].translated_field['en'])
class RelatedManagerTests(HvadTestCase, QONormalFixture):
qonormal_count = 2
def setUp(self):
super(RelatedManagerTests, self).setUp()
with translation.override('en'):
self.normal1 = QONormal.objects.get(shared_field=QONORMAL[1].shared_field)
self.normal2 = QONormal.objects.get(shared_field=QONORMAL[2].shared_field)
self.related = QOSimpleRelated.objects.create(normal=self.normal1,
translated_field='translated1_en')
# spurious instance to catch cases where filtering is not correct
obj = QOSimpleRelated.objects.create(normal=self.normal2,
translated_field='dummy')
obj.translate('ja').save()
self.many1 = QOMany.objects.create(translated_field='translated1_en')
self.many1.translate('ja')
self.many1.translated_field = 'translated1_ja'
self.many1.save()
# spurious instance to catch cases where filtering is not correct
obj = QOMany.objects.create(translated_field='dummy')
obj.translate('ja').save()
def test_forward_foreign_key(self):
""" ForeignKey accessor should use the TranslationQueryset and fetch
the translation when it retrieves the shared model.
"""
with translation.override('en'):
related = QOSimpleRelated.objects.get(pk=self.related.pk)
self.assertEqual(related.translated_field, self.related.translated_field)
with self.assertNumQueries(1):
self.assertEqual(related.normal.shared_field, QONORMAL[1].shared_field)
with self.assertNumQueries(0):
self.assertEqual(related.normal.translated_field, QONORMAL[1].translated_field['en'])
def test_reverse_foreign_key_query(self):
""" Reverse foreign key should use the TranslationQueryset """
with translation.override('en'):
qs = list(self.normal1.simplerel.all())
self.assertEqual(len(qs), 1)
with self.assertNumQueries(0):
self.assertEqual(qs[0].normal_id, self.qonormal_id[1])
self.assertEqual(qs[0].translated_field, self.related.translated_field)
with translation.override('ja'):
qs = list(self.normal1.simplerel.all())
self.assertEqual(len(qs), 0)
def test_reverse_foreign_key_updates(self):
""" Reverse foreign key should use the TranslationQueryset for adding/removing """
with translation.override('en'):
self.assertEqual(self.normal1.simplerel.count(), 1)
self.normal1.simplerel.clear()
self.assertEqual(self.normal1.simplerel.count(), 0)
self.normal1.simplerel.add(self.related)
self.assertEqual(self.normal1.simplerel.count(), 1)
self.normal1.simplerel.remove(self.related)
self.assertEqual(self.normal1.simplerel.count(), 0)
self.normal1.simplerel.create(translated_field='test_en')
self.assertEqual(self.normal1.simplerel.language('en').count(), 1)
self.assertEqual(self.normal1.simplerel.language('ja').count(), 0)
obj = self.normal1.simplerel.get(translated_field='test_en')
self.assertEqual(obj.normal_id, self.normal1.pk)
def test_forward_many_to_many_query(self):
""" Forward side of many to many relation should use the TranslationQueryset
"""
with translation.override('en'):
many = QOMany.objects.get(translated_field='translated1_en')
qs = many.normals.all()
self.assertEqual(len(qs), 0)
many.normals.add(self.normal1)
qs = list(many.normals.all())
with self.assertNumQueries(0):
self.assertEqual(len(qs), 1)
self.assertEqual(qs[0].shared_field, QONORMAL[1].shared_field)
self.assertEqual(qs[0].translated_field, QONORMAL[1].translated_field['en'])
qs = list(many.normals.filter(translated_field=QONORMAL[1].translated_field['en']))
with self.assertNumQueries(0):
self.assertEqual(len(qs), 1)
self.assertEqual(qs[0].shared_field, QONORMAL[1].shared_field)
self.assertEqual(qs[0].translated_field, QONORMAL[1].translated_field['en'])
def test_forward_many_to_many_updates(self):
with translation.override('en'):
many = QOMany.objects.get(translated_field='translated1_en')
many.normals.add(self.normal1, self.normal2)
self.assertEqual(many.normals.count(), 2)
many.normals.remove(self.normal2)
self.assertEqual(many.normals.count(), 1)
self.assertEqual(many.normals.get().pk, self.qonormal_id[1])
obj = many.normals.create(translated_field='test_en')
self.assertEqual(many.normals.count(), 2)
self.assertEqual(many.normals.language('ja').count(), 1)
self.assertEqual(list(obj.manyrels.values_list('pk', flat=True)), [many.pk])
many.normals.clear()
self.assertEqual(many.normals.count(), 0)
def test_reverse_many_to_many_query(self):
""" Reverse side of many to many relation should use the TranslationQueryset
"""
with translation.override('en'):
many = QOMany.objects.get(translated_field='translated1_en')
many.normals.add(self.normal1)
normal = QONormal.objects.get(pk=self.normal1.pk)
qs = list(normal.manyrels.all())
with self.assertNumQueries(0):
self.assertEqual(len(qs), 1)
self.assertEqual(qs[0].translated_field, 'translated1_en')
qs = list(normal.manyrels.filter(translated_field='translated1_en'))
with self.assertNumQueries(0):
self.assertEqual(len(qs), 1)
self.assertEqual(qs[0].translated_field, 'translated1_en')
class PrefetchRelatedTests(HvadTestCase, QONormalFixture):
qonormal_count = 2
def setUp(self):
super(PrefetchRelatedTests, self).setUp()
with translation.override('en'):
self.normal1 = QONormal.objects.get(shared_field=QONORMAL[1].shared_field)
self.normal2 = QONormal.objects.get(shared_field=QONORMAL[2].shared_field)
self.related = QOSimpleRelated.objects.create(normal=self.normal1,
translated_field='translated1_en')
# spurious instance to catch cases where filtering is not correct
obj = QOSimpleRelated.objects.create(normal=self.normal2,
translated_field='dummy')
obj.translate('ja').save()
self.many1 = QOMany.objects.create(translated_field='translated1_en')
self.many1.translate('ja')
self.many1.translated_field = 'translated1_ja'
self.many1.save()
# spurious instance to catch cases where filtering is not correct
obj = QOMany.objects.create(translated_field='dummy')
obj.translate('ja').save()
def test_reverse_foreign_key(self):
with translation.override('en'):
with self.assertNumQueries(2):
obj = QONormal.objects.prefetch_related('simplerel').get(pk=self.qonormal_id[1])
with self.assertNumQueries(0):
self.assertEqual(len(obj.simplerel.all()), 1)
self.assertEqual(obj.simplerel.all()[0].pk, self.related.pk)
# this is the crucial part: translation should be cached, too
self.assertEqual(obj.simplerel.all()[0].translated_field, 'translated1_en')
with translation.override('ja'):
with self.assertNumQueries(2):
obj = QONormal.objects.prefetch_related('simplerel').get(pk=self.qonormal_id[1])
with self.assertNumQueries(0):
# just checking language filter is actually applied
self.assertEqual(len(obj.simplerel.all()), 0)
def test_forward_many_to_many(self):
self.many1.normals.add(self.normal1)
with translation.override('en'):
with self.assertNumQueries(2):
obj = QOMany.objects.prefetch_related('normals').get(pk=self.many1.pk)
with self.assertNumQueries(0):
self.assertEqual(len(obj.normals.all()), 1)
self.assertEqual(obj.normals.all()[0].translated_field,
QONORMAL[1].translated_field['en'])
with translation.override('ja'):
with self.assertNumQueries(2):
obj = QOMany.objects.prefetch_related('normals').get(pk=self.many1.pk)
with self.assertNumQueries(0):
self.assertEqual(len(obj.normals.all()), 1)
self.assertEqual(obj.normals.all()[0].translated_field,
QONORMAL[1].translated_field['ja'])
def test_reverse_many_to_many(self):
self.many1.normals.add(self.normal1)
with translation.override('en'):
with self.assertNumQueries(2):
obj = QONormal.objects.prefetch_related('manyrels').get(pk=self.qonormal_id[1])
with self.assertNumQueries(0):
self.assertEqual(len(obj.manyrels.all()), 1)
self.assertEqual(obj.manyrels.all()[0].translated_field,
'translated1_en')
with translation.override('ja'):
with self.assertNumQueries(2):
obj = QONormal.objects.prefetch_related('manyrels').get(pk=self.qonormal_id[1])
with self.assertNumQueries(0):
self.assertEqual(len(obj.manyrels.all()), 1)
self.assertEqual(obj.manyrels.all()[0].translated_field,
'translated1_ja')
| bsd-3-clause |
donckers/ansible | lib/ansible/module_utils/vca.py | 14 | 11090 | #
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
try:
from pyvcloud.vcloudair import VCA
HAS_PYVCLOUD = True
except ImportError:
HAS_PYVCLOUD = False
from ansible.module_utils.basic import AnsibleModule
SERVICE_MAP = {'vca': 'ondemand', 'vchs': 'subscription', 'vcd': 'vcd'}
LOGIN_HOST = {'vca': 'vca.vmware.com', 'vchs': 'vchs.vmware.com'}
DEFAULT_SERVICE_TYPE = 'vca'
DEFAULT_VERSION = '5.7'
class VcaError(Exception):
def __init__(self, msg, **kwargs):
self.kwargs = kwargs
super(VcaError, self).__init__(msg)
def vca_argument_spec():
return dict(
username=dict(type='str', aliases=['user'], required=True),
password=dict(type='str', aliases=['pass','passwd'], required=True, no_log=True),
org=dict(),
service_id=dict(),
instance_id=dict(),
host=dict(),
api_version=dict(default=DEFAULT_VERSION),
service_type=dict(default=DEFAULT_SERVICE_TYPE, choices=SERVICE_MAP.keys()),
vdc_name=dict(),
gateway_name=dict(default='gateway'),
verify_certs=dict(type='bool', default=True)
)
class VcaAnsibleModule(AnsibleModule):
def __init__(self, *args, **kwargs):
argument_spec = vca_argument_spec()
argument_spec.update(kwargs.get('argument_spec', dict()))
kwargs['argument_spec'] = argument_spec
super(VcaAnsibleModule, self).__init__(*args, **kwargs)
if not HAS_PYVCLOUD:
self.fail("python module pyvcloud is required for this module")
self._vca = self.create_instance()
self.login()
self._gateway = None
self._vdc = None
@property
def vca(self):
return self._vca
@property
def gateway(self):
if self._gateway is not None:
return self._gateway
vdc_name = self.params['vdc_name']
gateway_name = self.params['gateway_name']
_gateway = self.vca.get_gateway(vdc_name, gateway_name)
if not _gateway:
raise VcaError('vca instance has no gateway named %s' % gateway_name)
self._gateway = _gateway
return _gateway
@property
def vdc(self):
if self._vdc is not None:
return self._vdc
vdc_name = self.params['vdc_name']
_vdc = self.vca.get_vdc(vdc_name)
if not _vdc:
raise VcaError('vca instance has no vdc named %s' % vdc_name)
self._vdc = _vdc
return _vdc
def get_vapp(self, vapp_name):
vapp = self.vca.get_vapp(self.vdc, vapp_name)
if not vapp:
raise VcaError('vca instance has no vapp named %s' % vapp_name)
return vapp
def get_vm(self, vapp_name, vm_name):
vapp = self.get_vapp(vapp_name)
vms = [vm for vm in children.get_Vm() if vm.name == vm_name]
try:
return vms[0]
except IndexError:
raise VcaError('vapp has no vm named %s' % vm_name)
def create_instance(self):
service_type = self.params.get('service_type', DEFAULT_SERVICE_TYPE)
if service_type == 'vcd':
host = self.params['host']
else:
host = LOGIN_HOST[service_type]
username = self.params['username']
version = self.params.get('api_version')
if service_type == 'vchs':
version = '5.6'
verify = self.params.get('verify_certs')
return VCA(host=host, username=username,
service_type=SERVICE_MAP[service_type],
version=version, verify=verify)
def login(self):
service_type = self.params['service_type']
password = self.params['password']
login_org = None
if service_type == 'vcd':
login_org = self.params['org']
if not self.vca.login(password=password, org=login_org):
self.fail('Login to VCA failed', response=self.vca.response)
try:
method_name = 'login_%s' % service_type
meth = getattr(self, method_name)
meth()
except AttributeError:
self.fail('no login method exists for service_type %s' % service_type)
except VcaError as e:
self.fail(e.message, response=self.vca.response, **e.kwargs)
def login_vca(self):
instance_id = self.params['instance_id']
if not instance_id:
raise VcaError('missing required instance_id for service_type vca')
self.vca.login_to_instance_sso(instance=instance_id)
def login_vchs(self):
service_id = self.params['service_id']
if not service_id:
raise VcaError('missing required service_id for service_type vchs')
org = self.params['org']
if not org:
raise VcaError('missing required org for service_type vchs')
self.vca.login_to_org(service_id, org)
def login_vcd(self):
org = self.params['org']
if not org:
raise VcaError('missing required org for service_type vcd')
if not self.vca.token:
raise VcaError('unable to get token for service_type vcd')
if not self.vca.vcloud_session.org_url:
raise VcaError('unable to get org_url for service_type vcd')
self.vca.login(token=self.vca.token, org=org,
org_url=self.vca.vcloud_session.org_url)
def save_services_config(self, blocking=True):
task = self.gateway.save_services_configuration()
if not task:
self.fail(msg='unable to save gateway services configuration')
if blocking:
self.vca.block_until_completed(task)
def fail(self, msg, **kwargs):
self.fail_json(msg=msg, **kwargs)
def exit(self, **kwargs):
self.exit_json(**kwargs)
# -------------------------------------------------------------
# 9/18/2015 @privateip
# All of the functions below here were migrated from the original
# vca_* modules. All functions below should be considered deprecated
# and will be removed once all of the vca_* modules have been updated
# to use the new instance module above
# -------------------------------------------------------------
VCA_REQ_ARGS = ['instance_id', 'vdc_name']
VCHS_REQ_ARGS = ['service_id']
def _validate_module(module):
if not HAS_PYVCLOUD:
module.fail_json(msg="python module pyvcloud is needed for this module")
service_type = module.params.get('service_type', DEFAULT_SERVICE_TYPE)
if service_type == 'vca':
for arg in VCA_REQ_ARGS:
if module.params.get(arg) is None:
module.fail_json(msg="argument %s is mandatory when service type "
"is vca" % arg)
if service_type == 'vchs':
for arg in VCHS_REQ_ARGS:
if module.params.get(arg) is None:
module.fail_json(msg="argument %s is mandatory when service type "
"is vchs" % arg)
if service_type == 'vcd':
for arg in VCD_REQ_ARGS:
if module.params.get(arg) is None:
module.fail_json(msg="argument %s is mandatory when service type "
"is vcd" % arg)
def serialize_instances(instance_list):
instances = []
for i in instance_list:
instances.append(dict(apiUrl=i['apiUrl'], instance_id=i['id']))
return instances
def _vca_login(vca, password, instance):
if not vca.login(password=password):
raise VcaError("Login Failed: Please check username or password",
error=vca.response.content)
if not vca.login_to_instance_sso(instance=instance):
s_json = serialize_instances(vca.instances)
raise VcaError("Login to Instance failed: Seems like instance_id provided "
"is wrong .. Please check", valid_instances=s_json)
return vca
def _vchs_login(vca, password, service, org):
if not vca.login(password=password):
raise VcaError("Login Failed: Please check username or password",
error=vca.response.content)
if not vca.login_to_org(service, org):
raise VcaError("Failed to login to org, Please check the orgname",
error=vca.response.content)
def _vcd_login(vca, password, org):
# TODO: this function needs to be refactored
if not vca.login(password=password, org=org):
raise VcaError("Login Failed: Please check username or password "
"or host parameters")
if not vca.login(password=password, org=org):
raise VcaError("Failed to get the token",
error=vca.response.content)
if not vca.login(token=vca.token, org=org, org_url=vca.vcloud_session.org_url):
raise VcaError("Failed to login to org", error=vca.response.content)
def vca_login(module):
service_type = module.params.get('service_type')
username = module.params.get('username')
password = module.params.get('password')
instance = module.params.get('instance_id')
org = module.params.get('org')
vdc_name = module.params.get('vdc_name')
service = module.params.get('service_id')
version = module.params.get('api_version')
verify = module.params.get('verify_certs')
_validate_module(module)
if not vdc_name and service_type == 'vchs':
vdc_name = module.params.get('service_id')
if not org and service_type == 'vchs':
org = vdc_name or service
if service_type == 'vcd':
host = module.params.get('host')
else:
host = LOGIN_HOST[service_type]
username = os.environ.get('VCA_USER', username)
password = os.environ.get('VCA_PASS', password)
if not username or not password:
msg = "Either the username or password is not set, please check args"
module.fail_json(msg=msg)
if service_type == 'vchs':
version = '5.6'
elif service_type == 'vcd' and not version:
version == '5.6'
vca = VCA(host=host, username=username,
service_type=SERVICE_MAP[service_type],
version=version, verify=verify)
try:
if service_type == 'vca':
_vca_login(vca, password, instance)
elif service_type == 'vchs':
_vchs_login(vca, password, service, org)
elif service_type == 'vcd':
_vcd_login(vca, password, org)
except VcaError as e:
module.fail_json(msg=e.message, **e.kwargs)
return vca
| gpl-3.0 |
bsmedberg/socorro | socorro/webapi/servers.py | 1 | 3703 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import web
import os
from socorro.webapi.classPartial import classWithPartialInit
from configman import Namespace, RequiredConfig
#==============================================================================
class WebServerBase(RequiredConfig):
required_config = Namespace()
#--------------------------------------------------------------------------
def __init__(self, config, services_list):
self.config = config
urls = []
for each in services_list:
if hasattr(each, 'uri'):
# this is the old middleware
uri, cls = each.uri, each
else:
# this is the new middleware_app
uri, cls = each
urls.append(uri)
urls.append(classWithPartialInit(cls, config))
self.urls = tuple(urls)
web.webapi.internalerror = web.debugerror
web.config.debug = False
self._identify()
self._wsgi_func = web.application(self.urls, globals()).wsgifunc()
#--------------------------------------------------------------------------
def run(self):
raise NotImplemented
#--------------------------------------------------------------------------
def _identify(self):
pass
#==============================================================================
class ApacheModWSGI(WebServerBase):
"""When running Apache, modwsgi requires a reference to a "wsgifunc" In
this varient of the WebServer class, the run function returns the result of
the webpy framework's wsgifunc. Applications that use this class must
provide a module level variable 'application' in the module given to Apache
modwsgi configuration. The value of the variable must be the _wsgi_func.
"""
#--------------------------------------------------------------------------
def run(self):
return self._wsgi_func
#--------------------------------------------------------------------------
def _identify(self):
self.config.logger.info('this is ApacheModWSGI')
#--------------------------------------------------------------------------
@staticmethod
def get_socorro_config_path(wsgi_file):
wsgi_path = os.path.dirname(os.path.realpath(wsgi_file))
config_path = os.path.join(wsgi_path, '..', 'config')
return os.path.abspath(config_path)
#==============================================================================
class StandAloneServer(WebServerBase):
required_config = Namespace()
required_config.add_option(
'port',
doc='the port to listen to for submissions',
default=8882
)
#==============================================================================
class CherryPy(StandAloneServer):
required_config = Namespace()
required_config.add_option(
'ip_address',
doc='the IP address from which to accept submissions',
default='127.0.0.1'
)
#--------------------------------------------------------------------------
def run(self):
web.runsimple(
self._wsgi_func,
(self.config.web_server.ip_address, self.config.web_server.port)
)
#--------------------------------------------------------------------------
def _identify(self):
self.config.logger.info(
'this is CherryPy from web.py running standalone at %s:%d',
self.config.web_server.ip_address,
self.config.web_server.port
)
| mpl-2.0 |
lgiordani/punch | punch/vcs_repositories/git_flow_repo.py | 1 | 3464 | from __future__ import print_function, absolute_import, division
import subprocess
import os
import six
from punch.vcs_repositories import git_repo as gr
from punch.vcs_repositories.exceptions import (
RepositoryStatusError,
RepositorySystemError
)
class GitFlowRepo(gr.GitRepo):
def __init__(self, working_path, config_obj, files_to_commit=None):
if six.PY2:
super(GitFlowRepo, self).__init__(
working_path, config_obj, files_to_commit)
else:
super().__init__(working_path, config_obj, files_to_commit)
self.release_branch = "release/{}".format(
self.config_obj.options['new_version']
)
def _set_command(self):
self.commands = ['git', 'flow']
self.command = 'git'
def _check_system(self):
# git flow -h returns 1 so the call fails
p = subprocess.Popen(
self.commands,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
if "git flow <subcommand>" not in stdout.decode('utf8'):
raise RepositorySystemError("Cannot run {}".format(self.commands))
if not os.path.exists(os.path.join(self.working_path, '.git')):
raise RepositorySystemError(
"The current directory {} is not a Git repository".format(
self.working_path))
def pre_start_release(self):
output = self._run([self.command, "status"])
if "Changes to be committed:" in output:
raise RepositoryStatusError(
("Cannot start release while repository "
"contains uncommitted changes")
)
self._run([self.command, "checkout", "develop"])
branch = self.get_current_branch()
if branch != "develop":
raise RepositoryStatusError(
"Current branch shall be develop but is {}".format(branch))
def start_release(self):
self._run(
self.commands + [
"release",
"start",
self.config_obj.options['new_version']
])
def finish_release(self):
branch = self.get_current_branch()
command = [self.command, "add"]
if self.config_obj.include_all_files:
command.append(".")
else:
command.extend(self.config_obj.include_files)
command.extend(self.files_to_commit)
self._run(command)
output = self._run([self.command, "status"])
if "nothing to commit, working directory clean" in output or \
"nothing to commit, working tree clean" in output:
self._run([self.command, "checkout", "develop"])
self._run([self.command, "branch", "-d", branch])
return
message = ["-m", self.config_obj.commit_message]
command_line = [self.command, "commit"]
command_line.extend(message)
self._run(command_line)
self._run(
self.commands + [
"release",
"finish",
"-m",
branch,
self.config_obj.options['new_version']
])
def post_finish_release(self):
pass
def get_info(self):
return [
("Commit message", self.config_obj.commit_message),
("Release branch", self.release_branch),
]
| isc |
jjz/mezzanine | mezzanine/forms/models.py | 23 | 4797 | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.fields import RichTextField
from mezzanine.core.models import Orderable, RichText
from mezzanine.forms import fields
from mezzanine.pages.models import Page
class Form(Page, RichText):
"""
A user-built form.
"""
button_text = models.CharField(_("Button text"), max_length=50, blank=True)
response = RichTextField(_("Response"))
send_email = models.BooleanField(_("Send email to user"), default=True,
help_text=_("To send an email to the email address supplied in "
"the form upon submission, check this box."))
email_from = models.EmailField(_("From address"), max_length=254,
help_text=_("The address the email will be sent from"), blank=True)
email_copies = models.CharField(_("Send email to others"), blank=True,
help_text=_("Provide a comma separated list of email addresses "
"to be notified upon form submission. Leave blank to "
"disable notifications."),
max_length=200)
email_subject = models.CharField(_("Subject"), max_length=200, blank=True)
email_message = models.TextField(_("Message"), blank=True,
help_text=_("Emails sent based on the above options will contain "
"each of the form fields entered. You can also enter "
"a message here that will be included in the email."))
class Meta:
verbose_name = _("Form")
verbose_name_plural = _("Forms")
class FieldManager(models.Manager):
"""
Only show visible fields when displaying actual form..
"""
def visible(self):
return self.filter(visible=True)
@python_2_unicode_compatible
class Field(Orderable):
"""
A field for a user-built form.
"""
form = models.ForeignKey("Form", related_name="fields")
label = models.CharField(_("Label"),
max_length=settings.FORMS_LABEL_MAX_LENGTH)
field_type = models.IntegerField(_("Type"), choices=fields.NAMES)
required = models.BooleanField(_("Required"), default=True)
visible = models.BooleanField(_("Visible"), default=True)
choices = models.CharField(_("Choices"), max_length=1000, blank=True,
help_text=_("Comma separated options where applicable. If an option "
"itself contains commas, surround the option with `backticks`."))
default = models.CharField(_("Default value"), blank=True,
max_length=settings.FORMS_FIELD_MAX_LENGTH)
placeholder_text = models.CharField(_("Placeholder Text"), blank=True,
max_length=100, editable=settings.FORMS_USE_HTML5)
help_text = models.CharField(_("Help text"), blank=True, max_length=100)
objects = FieldManager()
class Meta:
verbose_name = _("Field")
verbose_name_plural = _("Fields")
order_with_respect_to = "form"
def __str__(self):
return self.label
def get_choices(self):
"""
Parse a comma separated choice string into a list of choices taking
into account quoted choices.
"""
choice = ""
(quote, unquote) = ("`", "`")
quoted = False
for char in self.choices:
if not quoted and char == quote:
quoted = True
elif quoted and char == unquote:
quoted = False
elif char == "," and not quoted:
choice = choice.strip()
if choice:
yield choice, choice
choice = ""
else:
choice += char
choice = choice.strip()
if choice:
yield choice, choice
def is_a(self, *args):
"""
Helper that returns ``True`` if the field's type is given in any arg.
"""
return self.field_type in args
class FormEntry(models.Model):
"""
An entry submitted via a user-built form.
"""
form = models.ForeignKey("Form", related_name="entries")
entry_time = models.DateTimeField(_("Date/time"))
class Meta:
verbose_name = _("Form entry")
verbose_name_plural = _("Form entries")
class FieldEntry(models.Model):
"""
A single field value for a form entry submitted via a user-built form.
"""
entry = models.ForeignKey("FormEntry", related_name="fields")
field_id = models.IntegerField()
value = models.CharField(max_length=settings.FORMS_FIELD_MAX_LENGTH,
null=True)
class Meta:
verbose_name = _("Form field entry")
verbose_name_plural = _("Form field entries")
| bsd-2-clause |
freezmeinster/teh-manis | django/contrib/admin/models.py | 228 | 2207 | from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
from django.contrib.admin.util import quote
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
from django.utils.safestring import mark_safe
ADDITION = 1
CHANGE = 2
DELETION = 3
class LogEntryManager(models.Manager):
def log_action(self, user_id, content_type_id, object_id, object_repr, action_flag, change_message=''):
e = self.model(None, None, user_id, content_type_id, smart_unicode(object_id), object_repr[:200], action_flag, change_message)
e.save()
class LogEntry(models.Model):
action_time = models.DateTimeField(_('action time'), auto_now=True)
user = models.ForeignKey(User)
content_type = models.ForeignKey(ContentType, blank=True, null=True)
object_id = models.TextField(_('object id'), blank=True, null=True)
object_repr = models.CharField(_('object repr'), max_length=200)
action_flag = models.PositiveSmallIntegerField(_('action flag'))
change_message = models.TextField(_('change message'), blank=True)
objects = LogEntryManager()
class Meta:
verbose_name = _('log entry')
verbose_name_plural = _('log entries')
db_table = 'django_admin_log'
ordering = ('-action_time',)
def __repr__(self):
return smart_unicode(self.action_time)
def is_addition(self):
return self.action_flag == ADDITION
def is_change(self):
return self.action_flag == CHANGE
def is_deletion(self):
return self.action_flag == DELETION
def get_edited_object(self):
"Returns the edited object represented by this log entry"
return self.content_type.get_object_for_this_type(pk=self.object_id)
def get_admin_url(self):
"""
Returns the admin URL to edit the object represented by this log entry.
This is relative to the Django admin index page.
"""
if self.content_type and self.object_id:
return mark_safe(u"%s/%s/%s/" % (self.content_type.app_label, self.content_type.model, quote(self.object_id)))
return None | bsd-3-clause |
plissonf/scikit-learn | sklearn/linear_model/coordinate_descent.py | 59 | 76336 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already float64 Fortran ordered when bypassing
# checks
check_input = 'check_input' not in params or params['check_input']
pre_fit = 'check_input' not in params or params['pre_fit']
if check_input:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F',
copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if pre_fit:
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False,
copy=False, Xy_precompute_order='F')
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, 'csc', dtype=np.float64,
order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
# We expect X and y to be already float64 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F',
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False, Xy_precompute_order='F')
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False,
pre_fit=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
signed/intellij-community | python/lib/Lib/wsgiref/util.py | 104 | 5598 | """Miscellaneous WSGI-related Utilities"""
import posixpath
__all__ = [
'FileWrapper', 'guess_scheme', 'application_uri', 'request_uri',
'shift_path_info', 'setup_testing_defaults',
]
class FileWrapper:
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def next(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
def application_uri(environ):
"""Return the application's base URI (no PATH_INFO or QUERY_STRING)"""
url = environ['wsgi.url_scheme']+'://'
from urllib import quote
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += quote(environ.get('SCRIPT_NAME') or '/')
return url
def request_uri(environ, include_query=1):
"""Return the full request URI, optionally including the query string"""
url = application_uri(environ)
from urllib import quote
path_info = quote(environ.get('PATH_INFO',''))
if not environ.get('SCRIPT_NAME'):
url += path_info[1:]
else:
url += path_info
if include_query and environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
return url
def shift_path_info(environ):
"""Shift a name from PATH_INFO to SCRIPT_NAME, returning it
If there are no remaining path segments in PATH_INFO, return None.
Note: 'environ' is modified in-place; use a copy if you need to keep
the original PATH_INFO or SCRIPT_NAME.
Note: when PATH_INFO is just a '/', this returns '' and appends a trailing
'/' to SCRIPT_NAME, even though empty path segments are normally ignored,
and SCRIPT_NAME doesn't normally end in a '/'. This is intentional
behavior, to ensure that an application can tell the difference between
'/x' and '/x/' when traversing to objects.
"""
path_info = environ.get('PATH_INFO','')
if not path_info:
return None
path_parts = path_info.split('/')
path_parts[1:-1] = [p for p in path_parts[1:-1] if p and p<>'.']
name = path_parts[1]
del path_parts[1]
script_name = environ.get('SCRIPT_NAME','')
script_name = posixpath.normpath(script_name+'/'+name)
if script_name.endswith('/'):
script_name = script_name[:-1]
if not name and not script_name.endswith('/'):
script_name += '/'
environ['SCRIPT_NAME'] = script_name
environ['PATH_INFO'] = '/'.join(path_parts)
# Special case: '/.' on PATH_INFO doesn't get stripped,
# because we don't strip the last element of PATH_INFO
# if there's only one path part left. Instead of fixing this
# above, we fix it here so that PATH_INFO gets normalized to
# an empty string in the environ.
if name=='.':
name = None
return name
def setup_testing_defaults(environ):
"""Update 'environ' with trivial defaults for testing purposes
This adds various parameters required for WSGI, including HTTP_HOST,
SERVER_NAME, SERVER_PORT, REQUEST_METHOD, SCRIPT_NAME, PATH_INFO,
and all of the wsgi.* variables. It only supplies default values,
and does not replace any existing settings for these variables.
This routine is intended to make it easier for unit tests of WSGI
servers and applications to set up dummy environments. It should *not*
be used by actual WSGI servers or applications, since the data is fake!
"""
environ.setdefault('SERVER_NAME','127.0.0.1')
environ.setdefault('SERVER_PROTOCOL','HTTP/1.0')
environ.setdefault('HTTP_HOST',environ['SERVER_NAME'])
environ.setdefault('REQUEST_METHOD','GET')
if 'SCRIPT_NAME' not in environ and 'PATH_INFO' not in environ:
environ.setdefault('SCRIPT_NAME','')
environ.setdefault('PATH_INFO','/')
environ.setdefault('wsgi.version', (1,0))
environ.setdefault('wsgi.run_once', 0)
environ.setdefault('wsgi.multithread', 0)
environ.setdefault('wsgi.multiprocess', 0)
from StringIO import StringIO
environ.setdefault('wsgi.input', StringIO(""))
environ.setdefault('wsgi.errors', StringIO())
environ.setdefault('wsgi.url_scheme',guess_scheme(environ))
if environ['wsgi.url_scheme']=='http':
environ.setdefault('SERVER_PORT', '80')
elif environ['wsgi.url_scheme']=='https':
environ.setdefault('SERVER_PORT', '443')
_hoppish = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}.has_key
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return _hoppish(header_name.lower())
#
| apache-2.0 |
rhurkes/chasegame | venv/lib/python2.7/site-packages/cryptography/hazmat/bindings/openssl/pkcs12.py | 6 | 1141 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/pkcs12.h>
"""
TYPES = """
typedef ... PKCS12;
"""
FUNCTIONS = """
void PKCS12_free(PKCS12 *);
PKCS12 *d2i_PKCS12_bio(BIO *, PKCS12 **);
int i2d_PKCS12_bio(BIO *, PKCS12 *);
"""
MACROS = """
int PKCS12_parse(PKCS12 *, const char *, EVP_PKEY **, X509 **,
Cryptography_STACK_OF_X509 **);
PKCS12 *PKCS12_create(char *, char *, EVP_PKEY *, X509 *,
Cryptography_STACK_OF_X509 *, int, int, int, int, int);
"""
CUSTOMIZATIONS = """
"""
CONDITIONAL_NAMES = {}
| mit |
tudorian/eden | modules/templates/default/parser.py | 18 | 22381 | # -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
""" Message Parsing
Template-specific Message Parsers are defined here.
@copyright: 2012-15 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3Parser",)
#import re
#import pyparsing
try:
import nltk
from nltk.corpus import wordnet as wn
NLTK = True
except:
NLTK = False
from gluon import current
from gluon.tools import fetch
from s3.s3fields import S3Represent
from s3.s3parser import S3Parsing
from s3.s3utils import soundex
# =============================================================================
class S3Parser(object):
"""
Message Parsing Template
"""
# -------------------------------------------------------------------------
@staticmethod
def parse_twitter(message):
"""
Filter unstructured tweets
"""
db = current.db
s3db = current.s3db
cache = s3db.cache
# Start with a base priority
priority = 0
# Default Category
category = "Unknown"
# Lookup the channel type
#ctable = s3db.msg_channel
#channel = db(ctable.channel_id == message.channel_id).select(ctable.instance_type,
# limitby=(0, 1)
# ).first()
#service = channel.instance_type.split("_", 2)[1]
#if service in ("mcommons", "tropo", "twilio"):
# service = "sms"
#if service == "twitter":
# priority -= 1
#elif service == "sms":
# priority += 1
service = "twitter"
# Lookup trusted senders
# - these could be trained or just trusted
table = s3db.msg_sender
ctable = s3db.pr_contact
query = (table.deleted == False) & \
(ctable.pe_id == table.pe_id) & \
(ctable.contact_method == "TWITTER")
senders = db(query).select(table.priority,
ctable.value,
cache=cache)
for s in senders:
if sender == s[ctable].value:
priority += s[table].priority
break
# If Anonymous, check their history
# - within our database
# if service == "twitter":
# # Check Followers
# # Check Retweets
# # Check when account was created
# (Note that it is still possible to game this - plausible accounts can be purchased)
ktable = s3db.msg_keyword
keywords = db(ktable.deleted == False).select(ktable.id,
ktable.keyword,
ktable.incident_type_id,
cache=cache)
incident_type_represent = S3Represent(lookup="event_incident_type")
if NLTK:
# Lookup synonyms
# @ToDo: Cache
synonyms = {}
for kw in keywords:
syns = []
try:
synsets = wn.synsets(kw.keyword)
for synset in synsets:
syns += [lemma.name for lemma in synset.lemmas]
except LookupError:
nltk.download("wordnet")
synsets = wn.synsets(kw.keyword)
for synset in synsets:
syns += [lemma.name for lemma in synset.lemmas]
synonyms[kw.keyword.lower()] = syns
ltable = s3db.gis_location
query = (ltable.deleted != True) & \
(ltable.name != None)
locs = db(query).select(ltable.id,
ltable.name,
cache=cache)
lat = lon = None
location_id = None
loc_matches = 0
# Split message into words
words = message.split(" ")
index = 0
max_index = len(words) - 1
for word in words:
word = word.lower()
if word.endswith(".") or \
word.endswith(":") or \
word.endswith(","):
word = word[:-1]
skip = False
if word in ("safe", "ok"):
priority -= 1
elif word in ("help"):
priority += 1
elif service == "twitter" and \
word == "RT":
# @ToDo: Increase priority of the original message
priority -= 1
skip = True
# Look for URL
if word.startswith("http://"):
priority += 1
skip = True
# @ToDo: Follow URL to see if we can find an image
#try:
# page = fetch(word)
#except urllib2.HTTPError:
# pass
# Check returned str for image like IS_IMAGE()
if (index < max_index):
if word == "lat":
skip = True
try:
lat = words[index + 1]
lat = float(lat)
except:
pass
elif word == "lon":
skip = True
try:
lon = words[index + 1]
lon = float(lon)
except:
pass
if not skip:
for kw in keywords:
_word = kw.keyword.lower()
if _word == word:
# Check for negation
if index and words[index - 1].lower() == "no":
pass
else:
category = incident_type_represent(kw.incident_type_id)
break
elif NLTK:
# Synonyms
if word in synonyms[_word]:
# Check for negation
if index and words[index - 1].lower() == "no":
pass
else:
category = incident_type_represent(kw.incident_type_id)
break
# Check for Location
for loc in locs:
name = loc.name.lower()
# @ToDo: Do a Unicode comparison
if word == name:
if not loc_matches:
location_id = loc.id
priority += 1
loc_matches += 1
elif (index < max_index) and \
("%s %s" % (word, words[index + 1]) == name):
# Try names with 2 words
if not loc_matches:
location_id = loc.id
priority += 1
loc_matches += 1
index += 1
# @ToDo: Prioritise reports from people located where they are reporting from
# if coordinates:
if not loc_matches or loc_matches > 1:
if lat and lon:
location_id = ltable.insert(lat = lat,
lon = lon)
elif service == "twitter":
# @ToDo: Use Geolocation of Tweet
#location_id =
pass
# @ToDo: Update records inside this function with parsed data
# @ToDo: Image
#return category, priority, location_id
# No reply here
return None
# -------------------------------------------------------------------------
@staticmethod
def _parse_keywords(message_body):
"""
Parse Keywords
- helper function for search_resource, etc
"""
# Equivalent keywords in one list
primary_keywords = ["get", "give", "show"]
contact_keywords = ["email", "mobile", "facility", "clinical",
"security", "phone", "status", "hospital",
"person", "organisation"]
pkeywords = primary_keywords + contact_keywords
keywords = message_body.split(" ")
pquery = []
name = ""
for word in keywords:
match = None
for key in pkeywords:
if soundex(key) == soundex(word):
match = key
break
if match:
pquery.append(match)
else:
name = word
return pquery, name
# -------------------------------------------------------------------------
def search_resource(self, message):
"""
1st Pass Parser for searching resources
- currently supports people, hospitals and organisations.
"""
message_body = message.body
if not message_body:
return None
pquery, name = self._parse_keywords(message_body)
if "person" in pquery:
reply = self.search_person(message, pquery, name)
elif "hospital" in pquery:
reply = self.search_hospital(message, pquery, name)
elif "organisation" in pquery:
reply = self.search_organisation(message, pquery, name)
else:
reply = None
return reply
# -------------------------------------------------------------------------
def search_person(self, message, pquery=None, name=None):
"""
Search for People
- can be called direct
- can be called from search_by_keyword
"""
message_body = message.body
if not message_body:
return None
if not pquery or not name:
pquery, name = self._parse_keywords(message_body)
T = current.T
db = current.db
s3db = current.s3db
reply = None
result = []
# Person Search [get name person phone email]
s3_accessible_query = current.auth.s3_accessible_query
table = s3db.pr_person
query = (table.deleted == False) & \
(s3_accessible_query("read", table))
rows = db(query).select(table.pe_id,
table.first_name,
table.middle_name,
table.last_name)
_name = soundex(str(name))
for row in rows:
if (_name == soundex(row.first_name)) or \
(_name == soundex(row.middle_name)) or \
(_name == soundex(row.last_name)):
presult = dict(name = row.first_name, id = row.pe_id)
result.append(presult)
if len(result) == 0:
return T("No Match")
elif len(result) > 1:
return T("Multiple Matches")
else:
# Single Match
reply = result[0]["name"]
table = s3db.pr_contact
if "email" in pquery:
query = (table.pe_id == result[0]["id"]) & \
(table.contact_method == "EMAIL") & \
(s3_accessible_query("read", table))
recipient = db(query).select(table.value,
orderby = table.priority,
limitby=(0, 1)).first()
if recipient:
reply = "%s Email->%s" % (reply, recipient.value)
else:
reply = "%s 's Email Not available!" % reply
if "phone" in pquery:
query = (table.pe_id == result[0]["id"]) & \
(table.contact_method == "SMS") & \
(s3_accessible_query("read", table))
recipient = db(query).select(table.value,
orderby = table.priority,
limitby=(0, 1)).first()
if recipient:
reply = "%s Mobile->%s" % (reply,
recipient.value)
else:
reply = "%s 's Mobile Contact Not available!" % reply
return reply
# ---------------------------------------------------------------------
def search_hospital(self, message, pquery=None, name=None):
"""
Search for Hospitals
- can be called direct
- can be called from search_by_keyword
"""
message_body = message.body
if not message_body:
return None
if not pquery or not name:
pquery, name = self._parse_keywords(message_body)
T = current.T
db = current.db
s3db = current.s3db
reply = None
result = []
# Hospital Search [example: get name hospital facility status ]
table = s3db.hms_hospital
stable = s3db.hms_status
query = (table.deleted == False) & \
(current.auth.s3_accessible_query("read", table))
rows = db(query).select(table.id,
table.name,
table.aka1,
table.aka2,
table.phone_emergency
)
_name = soundex(str(name))
for row in rows:
if (_name == soundex(row.name)) or \
(_name == soundex(row.aka1)) or \
(_name == soundex(row.aka2)):
result.append(row)
if len(result) == 0:
return T("No Match")
elif len(result) > 1:
return T("Multiple Matches")
else:
# Single Match
hospital = result[0]
status = db(stable.hospital_id == hospital.id).select(stable.facility_status,
stable.clinical_status,
stable.security_status,
limitby=(0, 1)
).first()
reply = "%s %s (%s) " % (reply, hospital.name,
T("Hospital"))
if "phone" in pquery:
reply = reply + "Phone->" + str(hospital.phone_emergency)
if "facility" in pquery:
reply = reply + "Facility status " + \
str(stable.facility_status.represent\
(status.facility_status))
if "clinical" in pquery:
reply = reply + "Clinical status " + \
str(stable.clinical_status.represent\
(status.clinical_status))
if "security" in pquery:
reply = reply + "Security status " + \
str(stable.security_status.represent\
(status.security_status))
return reply
# ---------------------------------------------------------------------
def search_organisation(self, message, pquery=None, name=None):
"""
Search for Organisations
- can be called direct
- can be called from search_by_keyword
"""
message_body = message.body
if not message_body:
return None
if not pquery or not name:
pquery, name = self._parse_keywords(message_body)
T = current.T
db = current.db
s3db = current.s3db
reply = None
result = []
# Organization search [example: get name organisation phone]
s3_accessible_query = current.auth.s3_accessible_query
table = s3db.org_organisation
query = (table.deleted == False) & \
(s3_accessible_query("read", table))
rows = db(query).select(table.id,
table.name,
table.phone,
table.acronym)
_name = soundex(str(name))
for row in rows:
if (_name == soundex(row.name)) or \
(_name == soundex(row.acronym)):
result.append(row)
if len(reply) == 0:
return T("No Match")
elif len(result) > 1:
return T("Multiple Matches")
else:
# Single Match
org = result[0]
reply = "%s %s (%s) " % (reply, org.name,
T("Organization"))
if "phone" in pquery:
reply = reply + "Phone->" + str(org.phone)
if "office" in pquery:
otable = s3db.org_office
query = (otable.organisation_id == org.id) & \
(s3_accessible_query("read", otable))
office = db(query).select(otable.address,
limitby=(0, 1)).first()
reply = reply + "Address->" + office.address
return reply
# -------------------------------------------------------------------------
def parse_ireport(self, message):
"""
Parse Messages directed to the IRS Module
- logging new incidents
- responses to deployment requests
"""
message_body = message.body
if not message_body:
return None
(lat, lon, code, text) = current.msg.parse_opengeosms(message_body)
if code == "SI":
# Create New Incident Report
reply = self._create_ireport(lat, lon, text)
else:
# Is this a Response to a Deployment Request?
words = message_body.split(" ")
text = ""
reponse = ""
report_id = None
comments = False
for word in words:
if "SI#" in word and not ireport:
report = word.split("#")[1]
report_id = int(report)
elif (soundex(word) == soundex("Yes")) and report_id \
and not comments:
response = True
comments = True
elif soundex(word) == soundex("No") and report_id \
and not comments:
response = False
comments = True
elif comments:
text += word + " "
if report_id:
reply = self._respond_drequest(message, report_id, response, text)
else:
reply = None
return reply
# -------------------------------------------------------------------------
@staticmethod
def _create_ireport(lat, lon, text):
"""
Create New Incident Report
"""
s3db = current.s3db
rtable = s3db.irs_ireport
gtable = s3db.gis_location
info = text.split(" ")
name = info[len(info) - 1]
category = ""
for a in range(0, len(info) - 1):
category = category + info[a] + " "
#@ToDo: Check for an existing location in DB
#records = db(gtable.id>0).select(gtable.id, \
# gtable.lat,
# gtable.lon)
#for record in records:
# try:
# if "%.6f"%record.lat == str(lat) and \
# "%.6f"%record.lon == str(lon):
# location_id = record.id
# break
# except:
# pass
location_id = gtable.insert(name="Incident:%s" % name,
lat=lat,
lon=lon)
rtable.insert(name=name,
message=text,
category=category,
location_id=location_id)
# @ToDo: Include URL?
reply = "Incident Report Logged!"
return reply
# -------------------------------------------------------------------------
@staticmethod
def _respond_drequest(message, report_id, response, text):
"""
Parse Replies To Deployment Request
"""
# Can we identify the Human Resource?
hr_id = S3Parsing().lookup_human_resource(message.from_address)
if hr_id:
rtable = current.s3db.irs_ireport_human_resource
query = (rtable.ireport_id == report_id) & \
(rtable.human_resource_id == hr_id)
current.db(query).update(reply=text,
response=response)
reply = "Response Logged in the Report (Id: %d )" % report_id
else:
reply = None
return reply
# END =========================================================================
| mit |
adaur/SickRage | lib/chardet/euckrfreq.py | 342 | 13546 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKR_CHAR_TO_FREQ_ORDER = (
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
)
| gpl-3.0 |
rgeleta/odoo | addons/contacts/__openerp__.py | 260 | 1594 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Address Book',
'version': '1.0',
'category': 'Tools',
'description': """
This module gives you a quick view of your address book, accessible from your home page.
You can track your suppliers, customers and other contacts.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'summary': 'Contacts, People and Companies',
'depends': [
'mail',
],
'data': [
'contacts_view.xml',
],
'installable': True,
'application': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MaisamArif/NEST | backend/tmp_markov_framework/markov_script.py | 1 | 1912 | import numpy as np
import random
def normalize(arr):
s = sum(arr)
if s == 0:
s = 1
arr[0] = 1
for i, val in enumerate(arr):
arr[i] = val/s
def generate(width, height):
matrix = []
for i in range(height):
matrix.append([])
for j in range(width):
matrix[i].append(float(random.randint(0, 1000))/1000)
normalize(matrix[i])
matrix[i] = [round(x, 3) for x in matrix[i]]
return np.matrix(matrix)
def initialize(soc0, soc1):
matricies = []
for i in range(4):
matricies.append(generate(4,4))
#format is as follows [P0, IM0, P1, IM1]
P0, IM0, P1, IM1 = matricies
vm1 = IM1[0:,0] * IM0[0,0:] + IM1[0:,1] * IM0[1,0:] + IM1[0:,2] * IM0[2,0:] +IM1[0:,3] * IM0[3,0:]
vm2 = IM0[0:,0] * IM1[0,0:] + IM0[0:,1] * IM1[1,0:] + IM0[0:,2] * IM1[2,0:] +IM0[0:,3] * IM1[3,0:]
c0_to_c1 = ((1-soc0) * P0)+ (soc0 * vm1)
c1_to_c0 = ((1-soc1) * P1)+ (soc1 * vm2)
matricies.append(c0_to_c1)
matricies.append(c1_to_c0)
if random.randint(0,1) == 1:
position_and_direction = ['right', 'left', 'left', 'right']
else:
position_and_direction = ['left', 'right', 'right', 'left']
return matricies#, position_and_direction
def traverse(matrix):
rand = float(random.randint(0,1000))/1000
count = 0
for i, elem in enumerate(matrix):
if rand > count and rand < count + elem:
return i
count += elem
return len(matrix) - 1
def continue_chain(emo1, emo2, matricies):
T1, T2 = matricies
if random.randint(0,1) == 1:
position_and_direction = ['right', 'left', 'left', 'right']
else:
position_and_direction = ['left', 'right', 'right', 'left']
return (traverse(T1.A[emo1]), traverse(T2.A[emo2]))#, position_and_direction
if __name__ == "__main__":
pass
def main():
pass
| gpl-3.0 |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v2_7_0/device_filter_broker.py | 14 | 69946 | from ..broker import Broker
class DeviceFilterBroker(Broker):
controller = "device_filters"
def show(self, **kwargs):
"""Shows the details for the specified device filter.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceFilterID: The internal NetMRI identifier for the rule.
:type DeviceFilterID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device filter methods. The listed methods will be called on each device filter returned and included in the output. Available methods are: device_service, src_device_object, dest_device_object, device_filter_set, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device_service, src_device_object, dest_device_object, device_filter_set, data_source, device.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_filter: The device filter identified by the specified DeviceFilterID.
:rtype device_filter: DeviceFilter
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available device filters. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceFilterID: The internal NetMRI identifier for the rule.
:type DeviceFilterID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device to which this rule belongs.
:type DeviceID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device filters as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device filter methods. The listed methods will be called on each device filter returned and included in the output. Available methods are: device_service, src_device_object, dest_device_object, device_filter_set, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device_service, src_device_object, dest_device_object, device_filter_set, data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceFilterID
:param sort: The data field(s) to use for sorting the output. Default is DeviceFilterID. Valid values are DeviceFilterID, DeviceID, DataSourceID, DeviceServiceID, SrcDeviceObjectID, DestDeviceObjectID, DeviceFilterSetID, FltFirstSeenTime, FltStartTime, FltEndTime, FltTimestamp, FltChangedCols, FltOrder, FltKey, FltAllowInd, FltRelevantInd, FltEnabledInd, FltEstablishedInd, FltArtificialInd, FltConfigText, FltProvisionData.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceFilter. Valid values are DeviceFilterID, DeviceID, DataSourceID, DeviceServiceID, SrcDeviceObjectID, DestDeviceObjectID, DeviceFilterSetID, FltFirstSeenTime, FltStartTime, FltEndTime, FltTimestamp, FltChangedCols, FltOrder, FltKey, FltAllowInd, FltRelevantInd, FltEnabledInd, FltEstablishedInd, FltArtificialInd, FltConfigText, FltProvisionData. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_filters: An array of the DeviceFilter objects that match the specified input criteria.
:rtype device_filters: Array of DeviceFilter
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available device filters matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DestDeviceObjectID: The internal NetMRI identifier for the destination network object.
:type DestDeviceObjectID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceFilterID: The internal NetMRI identifier for the rule.
:type DeviceFilterID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceFilterSetID: The internal NetMRI identifier for the rule to which this rule belongs.
:type DeviceFilterSetID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device to which this rule belongs.
:type DeviceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceServiceID: The internal NetMRI identifier for the service (ports, protocol).
:type DeviceServiceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltAllowInd: A flag indicator for allowance of rule.
:type FltAllowInd: Array of Boolean
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltArtificialInd: A flag indicating that this rule has no counterpart in the device configuration.
:type FltArtificialInd: Array of Boolean
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltChangedCols: The fields that changed between this revision of the record and the previous revision.
:type FltChangedCols: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltConfigText: The original text of the configuration from which this rule is built.
:type FltConfigText: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltEnabledInd: A flag indicating whether this rule is enabled in the configuration or not.
:type FltEnabledInd: Array of Boolean
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltEndTime: The ending effective time of this record, or empty if still in effect.
:type FltEndTime: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltEstablishedInd: A flag indicating whether this rule has the option 'established' (cisco specific).
:type FltEstablishedInd: Array of Boolean
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltFirstSeenTime: The timestamp of when NetMRI first discovered this rule.
:type FltFirstSeenTime: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltKey: The computed key value that identifies this rule against text configuration.
:type FltKey: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltOrder: The numerical order of this rule in its rule list.
:type FltOrder: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltProvisionData: Internal data - do not modify, may change without warning.
:type FltProvisionData: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltRelevantInd: A flag indicating whether this rule is relevant for search operations.
:type FltRelevantInd: Array of Boolean
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltStartTime: The starting effective time of this record.
:type FltStartTime: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FltTimestamp: The date and time this record was collected or calculated.
:type FltTimestamp: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SrcDeviceObjectID: The internal NetMRI identifier for the source network object.
:type SrcDeviceObjectID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device filters as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device filter methods. The listed methods will be called on each device filter returned and included in the output. Available methods are: device_service, src_device_object, dest_device_object, device_filter_set, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device_service, src_device_object, dest_device_object, device_filter_set, data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceFilterID
:param sort: The data field(s) to use for sorting the output. Default is DeviceFilterID. Valid values are DeviceFilterID, DeviceID, DataSourceID, DeviceServiceID, SrcDeviceObjectID, DestDeviceObjectID, DeviceFilterSetID, FltFirstSeenTime, FltStartTime, FltEndTime, FltTimestamp, FltChangedCols, FltOrder, FltKey, FltAllowInd, FltRelevantInd, FltEnabledInd, FltEstablishedInd, FltArtificialInd, FltConfigText, FltProvisionData.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceFilter. Valid values are DeviceFilterID, DeviceID, DataSourceID, DeviceServiceID, SrcDeviceObjectID, DestDeviceObjectID, DeviceFilterSetID, FltFirstSeenTime, FltStartTime, FltEndTime, FltTimestamp, FltChangedCols, FltOrder, FltKey, FltAllowInd, FltRelevantInd, FltEnabledInd, FltEstablishedInd, FltArtificialInd, FltConfigText, FltProvisionData. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against device filters, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DestDeviceObjectID, DeviceFilterID, DeviceFilterSetID, DeviceID, DeviceServiceID, FltAllowInd, FltArtificialInd, FltChangedCols, FltConfigText, FltEnabledInd, FltEndTime, FltEstablishedInd, FltFirstSeenTime, FltKey, FltOrder, FltProvisionData, FltRelevantInd, FltStartTime, FltTimestamp, SrcDeviceObjectID.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_filters: An array of the DeviceFilter objects that match the specified input criteria.
:rtype device_filters: Array of DeviceFilter
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available device filters matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DestDeviceObjectID, DeviceFilterID, DeviceFilterSetID, DeviceID, DeviceServiceID, FltAllowInd, FltArtificialInd, FltChangedCols, FltConfigText, FltEnabledInd, FltEndTime, FltEstablishedInd, FltFirstSeenTime, FltKey, FltOrder, FltProvisionData, FltRelevantInd, FltStartTime, FltTimestamp, SrcDeviceObjectID.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DestDeviceObjectID: The operator to apply to the field DestDeviceObjectID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DestDeviceObjectID: The internal NetMRI identifier for the destination network object. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DestDeviceObjectID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DestDeviceObjectID: If op_DestDeviceObjectID is specified, the field named in this input will be compared to the value in DestDeviceObjectID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DestDeviceObjectID must be specified if op_DestDeviceObjectID is specified.
:type val_f_DestDeviceObjectID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DestDeviceObjectID: If op_DestDeviceObjectID is specified, this value will be compared to the value in DestDeviceObjectID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DestDeviceObjectID must be specified if op_DestDeviceObjectID is specified.
:type val_c_DestDeviceObjectID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceFilterID: The operator to apply to the field DeviceFilterID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceFilterID: The internal NetMRI identifier for the rule. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceFilterID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceFilterID: If op_DeviceFilterID is specified, the field named in this input will be compared to the value in DeviceFilterID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceFilterID must be specified if op_DeviceFilterID is specified.
:type val_f_DeviceFilterID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceFilterID: If op_DeviceFilterID is specified, this value will be compared to the value in DeviceFilterID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceFilterID must be specified if op_DeviceFilterID is specified.
:type val_c_DeviceFilterID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceFilterSetID: The operator to apply to the field DeviceFilterSetID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceFilterSetID: The internal NetMRI identifier for the rule to which this rule belongs. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceFilterSetID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceFilterSetID: If op_DeviceFilterSetID is specified, the field named in this input will be compared to the value in DeviceFilterSetID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceFilterSetID must be specified if op_DeviceFilterSetID is specified.
:type val_f_DeviceFilterSetID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceFilterSetID: If op_DeviceFilterSetID is specified, this value will be compared to the value in DeviceFilterSetID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceFilterSetID must be specified if op_DeviceFilterSetID is specified.
:type val_c_DeviceFilterSetID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device to which this rule belongs. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceServiceID: The operator to apply to the field DeviceServiceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceServiceID: The internal NetMRI identifier for the service (ports, protocol). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceServiceID: If op_DeviceServiceID is specified, the field named in this input will be compared to the value in DeviceServiceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceServiceID must be specified if op_DeviceServiceID is specified.
:type val_f_DeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceServiceID: If op_DeviceServiceID is specified, this value will be compared to the value in DeviceServiceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceServiceID must be specified if op_DeviceServiceID is specified.
:type val_c_DeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltAllowInd: The operator to apply to the field FltAllowInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltAllowInd: A flag indicator for allowance of rule. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltAllowInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltAllowInd: If op_FltAllowInd is specified, the field named in this input will be compared to the value in FltAllowInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltAllowInd must be specified if op_FltAllowInd is specified.
:type val_f_FltAllowInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltAllowInd: If op_FltAllowInd is specified, this value will be compared to the value in FltAllowInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltAllowInd must be specified if op_FltAllowInd is specified.
:type val_c_FltAllowInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltArtificialInd: The operator to apply to the field FltArtificialInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltArtificialInd: A flag indicating that this rule has no counterpart in the device configuration. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltArtificialInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltArtificialInd: If op_FltArtificialInd is specified, the field named in this input will be compared to the value in FltArtificialInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltArtificialInd must be specified if op_FltArtificialInd is specified.
:type val_f_FltArtificialInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltArtificialInd: If op_FltArtificialInd is specified, this value will be compared to the value in FltArtificialInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltArtificialInd must be specified if op_FltArtificialInd is specified.
:type val_c_FltArtificialInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltChangedCols: The operator to apply to the field FltChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltChangedCols: If op_FltChangedCols is specified, the field named in this input will be compared to the value in FltChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltChangedCols must be specified if op_FltChangedCols is specified.
:type val_f_FltChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltChangedCols: If op_FltChangedCols is specified, this value will be compared to the value in FltChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltChangedCols must be specified if op_FltChangedCols is specified.
:type val_c_FltChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltConfigText: The operator to apply to the field FltConfigText. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltConfigText: The original text of the configuration from which this rule is built. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltConfigText: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltConfigText: If op_FltConfigText is specified, the field named in this input will be compared to the value in FltConfigText using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltConfigText must be specified if op_FltConfigText is specified.
:type val_f_FltConfigText: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltConfigText: If op_FltConfigText is specified, this value will be compared to the value in FltConfigText using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltConfigText must be specified if op_FltConfigText is specified.
:type val_c_FltConfigText: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltEnabledInd: The operator to apply to the field FltEnabledInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltEnabledInd: A flag indicating whether this rule is enabled in the configuration or not. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltEnabledInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltEnabledInd: If op_FltEnabledInd is specified, the field named in this input will be compared to the value in FltEnabledInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltEnabledInd must be specified if op_FltEnabledInd is specified.
:type val_f_FltEnabledInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltEnabledInd: If op_FltEnabledInd is specified, this value will be compared to the value in FltEnabledInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltEnabledInd must be specified if op_FltEnabledInd is specified.
:type val_c_FltEnabledInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltEndTime: The operator to apply to the field FltEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltEndTime: The ending effective time of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltEndTime: If op_FltEndTime is specified, the field named in this input will be compared to the value in FltEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltEndTime must be specified if op_FltEndTime is specified.
:type val_f_FltEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltEndTime: If op_FltEndTime is specified, this value will be compared to the value in FltEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltEndTime must be specified if op_FltEndTime is specified.
:type val_c_FltEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltEstablishedInd: The operator to apply to the field FltEstablishedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltEstablishedInd: A flag indicating whether this rule has the option 'established' (cisco specific). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltEstablishedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltEstablishedInd: If op_FltEstablishedInd is specified, the field named in this input will be compared to the value in FltEstablishedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltEstablishedInd must be specified if op_FltEstablishedInd is specified.
:type val_f_FltEstablishedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltEstablishedInd: If op_FltEstablishedInd is specified, this value will be compared to the value in FltEstablishedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltEstablishedInd must be specified if op_FltEstablishedInd is specified.
:type val_c_FltEstablishedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltFirstSeenTime: The operator to apply to the field FltFirstSeenTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltFirstSeenTime: The timestamp of when NetMRI first discovered this rule. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltFirstSeenTime: If op_FltFirstSeenTime is specified, the field named in this input will be compared to the value in FltFirstSeenTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltFirstSeenTime must be specified if op_FltFirstSeenTime is specified.
:type val_f_FltFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltFirstSeenTime: If op_FltFirstSeenTime is specified, this value will be compared to the value in FltFirstSeenTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltFirstSeenTime must be specified if op_FltFirstSeenTime is specified.
:type val_c_FltFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltKey: The operator to apply to the field FltKey. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltKey: The computed key value that identifies this rule against text configuration. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltKey: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltKey: If op_FltKey is specified, the field named in this input will be compared to the value in FltKey using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltKey must be specified if op_FltKey is specified.
:type val_f_FltKey: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltKey: If op_FltKey is specified, this value will be compared to the value in FltKey using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltKey must be specified if op_FltKey is specified.
:type val_c_FltKey: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltOrder: The operator to apply to the field FltOrder. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltOrder: The numerical order of this rule in its rule list. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltOrder: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltOrder: If op_FltOrder is specified, the field named in this input will be compared to the value in FltOrder using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltOrder must be specified if op_FltOrder is specified.
:type val_f_FltOrder: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltOrder: If op_FltOrder is specified, this value will be compared to the value in FltOrder using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltOrder must be specified if op_FltOrder is specified.
:type val_c_FltOrder: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltProvisionData: The operator to apply to the field FltProvisionData. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltProvisionData: Internal data - do not modify, may change without warning. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltProvisionData: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltProvisionData: If op_FltProvisionData is specified, the field named in this input will be compared to the value in FltProvisionData using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltProvisionData must be specified if op_FltProvisionData is specified.
:type val_f_FltProvisionData: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltProvisionData: If op_FltProvisionData is specified, this value will be compared to the value in FltProvisionData using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltProvisionData must be specified if op_FltProvisionData is specified.
:type val_c_FltProvisionData: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltRelevantInd: The operator to apply to the field FltRelevantInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltRelevantInd: A flag indicating whether this rule is relevant for search operations. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltRelevantInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltRelevantInd: If op_FltRelevantInd is specified, the field named in this input will be compared to the value in FltRelevantInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltRelevantInd must be specified if op_FltRelevantInd is specified.
:type val_f_FltRelevantInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltRelevantInd: If op_FltRelevantInd is specified, this value will be compared to the value in FltRelevantInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltRelevantInd must be specified if op_FltRelevantInd is specified.
:type val_c_FltRelevantInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltStartTime: The operator to apply to the field FltStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltStartTime: The starting effective time of this record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltStartTime: If op_FltStartTime is specified, the field named in this input will be compared to the value in FltStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltStartTime must be specified if op_FltStartTime is specified.
:type val_f_FltStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltStartTime: If op_FltStartTime is specified, this value will be compared to the value in FltStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltStartTime must be specified if op_FltStartTime is specified.
:type val_c_FltStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FltTimestamp: The operator to apply to the field FltTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FltTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FltTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FltTimestamp: If op_FltTimestamp is specified, the field named in this input will be compared to the value in FltTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FltTimestamp must be specified if op_FltTimestamp is specified.
:type val_f_FltTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FltTimestamp: If op_FltTimestamp is specified, this value will be compared to the value in FltTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FltTimestamp must be specified if op_FltTimestamp is specified.
:type val_c_FltTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SrcDeviceObjectID: The operator to apply to the field SrcDeviceObjectID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SrcDeviceObjectID: The internal NetMRI identifier for the source network object. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SrcDeviceObjectID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SrcDeviceObjectID: If op_SrcDeviceObjectID is specified, the field named in this input will be compared to the value in SrcDeviceObjectID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SrcDeviceObjectID must be specified if op_SrcDeviceObjectID is specified.
:type val_f_SrcDeviceObjectID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SrcDeviceObjectID: If op_SrcDeviceObjectID is specified, this value will be compared to the value in SrcDeviceObjectID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SrcDeviceObjectID must be specified if op_SrcDeviceObjectID is specified.
:type val_c_SrcDeviceObjectID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device filters as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device filter methods. The listed methods will be called on each device filter returned and included in the output. Available methods are: device_service, src_device_object, dest_device_object, device_filter_set, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device_service, src_device_object, dest_device_object, device_filter_set, data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceFilterID
:param sort: The data field(s) to use for sorting the output. Default is DeviceFilterID. Valid values are DeviceFilterID, DeviceID, DataSourceID, DeviceServiceID, SrcDeviceObjectID, DestDeviceObjectID, DeviceFilterSetID, FltFirstSeenTime, FltStartTime, FltEndTime, FltTimestamp, FltChangedCols, FltOrder, FltKey, FltAllowInd, FltRelevantInd, FltEnabledInd, FltEstablishedInd, FltArtificialInd, FltConfigText, FltProvisionData.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceFilter. Valid values are DeviceFilterID, DeviceID, DataSourceID, DeviceServiceID, SrcDeviceObjectID, DestDeviceObjectID, DeviceFilterSetID, FltFirstSeenTime, FltStartTime, FltEndTime, FltTimestamp, FltChangedCols, FltOrder, FltKey, FltAllowInd, FltRelevantInd, FltEnabledInd, FltEstablishedInd, FltArtificialInd, FltConfigText, FltProvisionData. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_filters: An array of the DeviceFilter objects that match the specified input criteria.
:rtype device_filters: Array of DeviceFilter
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def data_source(self, **kwargs):
"""The collector NetMRI that collected this data record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceFilterID: The internal NetMRI identifier for the rule.
:type DeviceFilterID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The collector NetMRI that collected this data record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def src_device_object(self, **kwargs):
"""The network object for the source of the filtered traffic.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceFilterID: The internal NetMRI identifier for the rule.
:type DeviceFilterID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The network object for the source of the filtered traffic.
:rtype : DeviceObject
"""
return self.api_request(self._get_method_fullname("src_device_object"), kwargs)
def dest_device_object(self, **kwargs):
"""The network object for the destination of the filtered traffic.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceFilterID: The internal NetMRI identifier for the rule.
:type DeviceFilterID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The network object for the destination of the filtered traffic.
:rtype : DeviceObject
"""
return self.api_request(self._get_method_fullname("dest_device_object"), kwargs)
def device_service(self, **kwargs):
"""The service object of this rule
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceFilterID: The internal NetMRI identifier for the rule.
:type DeviceFilterID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The service object of this rule
:rtype : DeviceService
"""
return self.api_request(self._get_method_fullname("device_service"), kwargs)
def device_filter_set(self, **kwargs):
"""The rule list to which that rule belongs
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceFilterID: The internal NetMRI identifier for the rule.
:type DeviceFilterID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The rule list to which that rule belongs
:rtype : DeviceFilterSet
"""
return self.api_request(self._get_method_fullname("device_filter_set"), kwargs)
def device(self, **kwargs):
"""The device from which this data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceFilterID: The internal NetMRI identifier for the rule.
:type DeviceFilterID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this data was collected.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
def describe(self, **kwargs):
"""Returns inquired information about this device rule based on description type.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param device_filter_id: The internal identifier of this device rule.
:type device_filter_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param description_type: The description type for a specific device rule property: Source, Destination, Port, Protocol, Service, ConfigText
:type description_type: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("describe"), kwargs)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.