repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
allenp/odoo | openerp/addons/test_workflow/models.py | 54 | 2259 | # -*- coding: utf-8 -*-
import logging
import openerp.osv.orm
_logger = logging.getLogger(__name__)
class m(openerp.osv.orm.Model):
""" A model for which we will define a workflow (see data.xml). """
_name = 'test.workflow.model'
def print_(self, cr, uid, ids, s, context=None):
_logger.info('Running activity `%s` for record %s', s, ids)
return True
def print_a(self, cr, uid, ids, context=None):
return self.print_(cr, uid, ids, 'a', context)
def print_b(self, cr, uid, ids, context=None):
return self.print_(cr, uid, ids, 'b', context)
def print_c(self, cr, uid, ids, context=None):
return self.print_(cr, uid, ids, 'c', context)
def condition(self, cr, uid, ids, context=None):
m = self.pool['test.workflow.trigger']
for r in m.browse(cr, uid, [1], context=context):
if not r.value:
return False
return True
def trigger(self, cr, uid, context=None):
return openerp.workflow.trg_trigger(uid, 'test.workflow.trigger', 1, cr)
class n(openerp.osv.orm.Model):
""" A model used for the trigger feature. """
_name = 'test.workflow.trigger'
_columns = { 'value': openerp.osv.fields.boolean('Value') }
_defaults = { 'value': False }
class a(openerp.osv.orm.Model):
_name = 'test.workflow.model.a'
_columns = { 'value': openerp.osv.fields.integer('Value') }
_defaults = { 'value': 0 }
class b(openerp.osv.orm.Model):
_name = 'test.workflow.model.b'
_inherit = 'test.workflow.model.a'
class c(openerp.osv.orm.Model):
_name = 'test.workflow.model.c'
_inherit = 'test.workflow.model.a'
class d(openerp.osv.orm.Model):
_name = 'test.workflow.model.d'
_inherit = 'test.workflow.model.a'
class e(openerp.osv.orm.Model):
_name = 'test.workflow.model.e'
_inherit = 'test.workflow.model.a'
for name in 'bcdefghijkl':
#
# Do not use type() to create the class here, but use the class construct.
# This is because the __module__ of the new class would be the one of the
# metaclass that provides method __new__!
#
class NewModel(openerp.osv.orm.Model):
_name = 'test.workflow.model.%s' % name
_inherit = 'test.workflow.model.a'
| gpl-3.0 |
Alwnikrotikz/paimei | pida/__init__.py | 1 | 12563 | #
# $Id$
#
__all__ = \
[
"basic_block",
"defines",
"function",
"instruction",
"module",
]
from basic_block import *
from defines import *
from function import *
from instruction import *
from module import *
# we don't want to make wx an required module for PIDA.
try: import wx
except: pass
import cPickle
import zlib
import hashlib
import struct
########################################################################################################################
'''
These wrappers redirect to one of the dump/load routines. Currently the chosen vector is "custom". I left the other
approaches in this file mostly as a reminder of what *not* to do in the event that the need to improve the current
routine arises in the future.
'''
def dump (file_name, module, progress_bar=None):
'''
Dump the entire module structure to disk.
@type file_name: String
@param file_name: File name to export to
@type module: pida.module
@param module: Module to export to disk
@type progress_bar: String
@param progress_bar: (Optional, Def=None) Can be one of "wx", "ascii" or None
@rtype: Boolean
@return: True on success, False otherwise.
'''
if progress_bar:
progress_bar = progress_bar.lower()
return dump_custom(file_name, module, progress_bar)
def load (file_name, progress_bar=None):
'''
Restore a saved PIDA module from disk.
@type file_name: String
@param file_name: File name to import from
@type progress_bar: String
@param progress_bar: (Optional, Def=None) Can be one of "wx", "ascii" or None
@rtype: Mixed
@return: Imported module on success, 0 on cancel and -1 on failure.
'''
if progress_bar:
progress_bar = progress_bar.lower()
return load_custom(file_name, progress_bar)
########################################################################################################################
def dump_custom (file_name, module, progress_bar=None):
'''
Dump the entire module structure to disk. This is done by first removing the large "nodes" attribute from the
module. The modified module structure is then cPickle-ed, compressed and written to disk with a 4 byte size prefix.
Next the number of nodes is calculated and written to disk as a 4 byte value. Finally, the "nodes" attribute is
individually sliced and stored to disk through the above cPicle/compress method.
@see: load_custom()
@type file_name: String
@param file_name: File name to export to
@type module: pida.module
@param module: Module to export to disk
@type progress_bar: String
@param progress_bar: (Optional, Def=None) Can be one of "wx", "ascii" or None
@rtype: Boolean
@return: True on success, False otherwise.
'''
fh = open(file_name, "wb+")
# write the version signature disk.
fh.write(struct.pack(">L", PIDA_VERSION))
# remove the intra-object pointers and aliases.
module.functions = None
for func in module.nodes.values():
func.module = None
func.basic_blocks = None
for bb in func.nodes.values():
bb.function = None
bb.nodes = None
for ins in bb.instructions.values():
ins.basic_block = None
# detach the (possible large) nodes data structure from the module.
nodes = module.nodes
module.nodes = {}
# write the "rest" of the data structure to disk.
rest = zlib.compress(cPickle.dumps(module, protocol=2))
fh.write(struct.pack(">L", len(rest)))
fh.write(rest)
# write the node count to disk.
num_nodes = len(nodes)
count = 0
percent_indicator = 0
fh.write(struct.pack(">L", num_nodes))
# create a progress bar.
if progress_bar == "wx":
pb = wx.ProgressDialog("Storing %s to disk" % module.name,
"%d total nodes to process." % num_nodes,
maximum = num_nodes,
style = wx.PD_CAN_ABORT | wx.PD_AUTO_HIDE | wx.PD_APP_MODAL | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME)
# slice up the nodes attribute and individually store each node to disk.
for entry in nodes.values():
count += 1
node = zlib.compress(cPickle.dumps(entry, protocol=2))
fh.write(struct.pack(">L", len(node)))
fh.write(node)
if progress_bar == "wx":
if not pb.Update(count):
pb.Destroy()
fh.close()
return False
elif progress_bar == "ascii":
percent_complete = float(count) / float(num_nodes) * 100
if percent_complete > 25 and percent_indicator == 0:
print "25%",
percent_indicator += 1
elif percent_complete > 50 and percent_indicator == 1:
print "50%",
percent_indicator += 1
elif percent_complete > 75 and percent_indicator == 2:
print "75%",
percent_indicator += 1
# restore the nodes attribute.
module.nodes = nodes
if progress_bar == "wx":
pb.Destroy()
fh.close()
return True
########################################################################################################################
def dump_orig (file_name, module, progress_bar=None):
'''
cPickle -> compress -> dump the entire module structure to disk. This was the original method used to store the
PIDA data structure to disk. Unfortunately, for larger modules I was getting "out of memory" errors.
@see: load_orig()
@type file_name: String
@param file_name: File name to export to
@type module: pida.module
@param module: Module to export to disk
'''
fh = open(file_name, "wb+")
dump = cPickle.dumps(module, protocol=2)
comp = zlib.compress(dump)
fh.write(comp)
fh.close()
########################################################################################################################
def dump_shelve (file_name, module, progress_bar=None):
'''
@see: load_shelve()
@type file_name: String
@param file_name: File name to export to
@type module: pida.module
@param module: Module to export to disk
'''
import os
import shelve
os.unlink(file_name)
sh = shelve.open(file_name, flag='n', writeback=True, protocol=2)
# attributes from pida.module
sh["name"] = module.name
sh["base"] = module.base
sh["depth"] = module.depth
sh["analysis"] = module.analysis
sh["signature"] = module.signature
sh["version"] = module.version
sh["ext"] = module.ext
# attributes inherited from pgraph.graph
sh["id"] = module.id
sh["clusters"] = module.clusters
sh["edges"] = module.edges
sh["nodes"] = {}
# we store the node dictionary piece by piece to avoid out of memory conditions.
for key, val in module.nodes.items():
sh["nodes"][key] = val
sh.sync()
sh.close()
########################################################################################################################
def load_custom (file_name, progress_bar=None):
'''
Restore a saved PIDA module from disk.
@see: dump_custom()
@type file_name: String
@param file_name: File name to import from
@rtype: Mixed
@return: Imported module on success, 0 on cancel and -1 on failure.
'''
fh = open(file_name, "rb")
# read and verify the version signature from disk.
version = int(struct.unpack(">L", fh.read(4))[0])
if version != PIDA_VERSION:
return -1
# read the "rest" of the data structure from disk.
length = int(struct.unpack(">L", fh.read(4))[0])
data = fh.read(length)
module = cPickle.loads(zlib.decompress(data))
# read the node count from disk.
num_nodes = int(struct.unpack(">L", fh.read(4))[0])
count = 0
percent_indicator = 0
# create a progress bar.
if progress_bar == "wx":
pb = wx.ProgressDialog("Loading %s from disk" % module.name,
"%d total nodes to process." % num_nodes,
maximum = num_nodes,
style = wx.PD_CAN_ABORT | wx.PD_AUTO_HIDE | wx.PD_APP_MODAL | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME)
# read each individual node structure from disk and merge it into the module.nodes dictionary.
while 1:
try:
count += 1
length = int(struct.unpack(">L", fh.read(4))[0])
except:
# EOF reached
break
data = fh.read(length)
node = cPickle.loads(zlib.decompress(data))
module.nodes[node.id] = node
if progress_bar == "wx":
if not pb.Update(count):
pb.Destroy()
fh.close()
return 0
elif progress_bar == "ascii":
percent_complete = float(count) / float(num_nodes) * 100
if percent_complete > 25 and percent_indicator == 0:
print "25%",
percent_indicator += 1
elif percent_complete > 50 and percent_indicator == 1:
print "50%",
percent_indicator += 1
elif percent_complete > 75 and percent_indicator == 2:
print "75%",
percent_indicator += 1
if progress_bar == "wx":
pb.Destroy()
# restore the intra-object pointers and aliases.
module.functions = module.nodes
for func in module.nodes.values():
func.module = module
func.basic_blocks = func.nodes
for bb in func.nodes.values():
bb.function = func
bb.nodes = bb.instructions
for ins in bb.instructions.values():
ins.basic_block = bb
fh.close()
return module
########################################################################################################################
def load_orig (file_name, progress_bar=None):
'''
@see: dump_orig()
@type name: String
@param name: File name to import from
@rtype: pida.module
@return: Imported module
'''
fh = open(file_name, "rb")
comp = fh.read()
dump = zlib.decompress(comp)
module = cPickle.loads(dump)
fh.close()
return module
########################################################################################################################
def load_shelve (file_name, progress_bar=None):
'''
Load a module from disk.
@see: dump_shelve()
@type name: String
@param name: File name to import from
@rtype: pida.module
@return: Imported module
'''
import shelve
sh = shelve.open(file_name, flag='r', protocol=2)
mod = module()
# attributes from pida.module
mod.name = sh["name"]
mod.base = sh["base"]
mod.depth = sh["depth"]
mod.analysis = sh["analysis"]
mod.signature = sh["signature"]
mod.version = sh["version"]
mod.ext = sh["ext"]
# attributes inherited from pgraph.graph
mod.id = sh["id"]
mod.clusters = sh["clusters"]
mod.edges = sh["edges"]
mod.nodes = {}
# we restore the node dictionary piece by piece to avoid out of memory conditions.
for key, val in sh["nodes"].items():
mod.nodes[key] = val
return
########################################################################################################################
def signature (file_name):
'''
Create and return a signature (hash) for the specified file.
@todo: Look into replacing this with something faster.
@type name: String
@param name: File name to import from
@rtype: String
@return: 32 character MD5 hex string
'''
try:
fh = open(file_name, "rb")
except:
# try this on for size.
fh = open("c:" + file_name, "rb")
m = hashlib.md5()
m.update(fh.read())
return m.hexdigest()
| gpl-2.0 |
dnlm92/chokoretto | main/lib/unidecode/x0b3.py | 253 | 4741 | data = (
'dae', # 0x00
'daeg', # 0x01
'daegg', # 0x02
'daegs', # 0x03
'daen', # 0x04
'daenj', # 0x05
'daenh', # 0x06
'daed', # 0x07
'dael', # 0x08
'daelg', # 0x09
'daelm', # 0x0a
'daelb', # 0x0b
'daels', # 0x0c
'daelt', # 0x0d
'daelp', # 0x0e
'daelh', # 0x0f
'daem', # 0x10
'daeb', # 0x11
'daebs', # 0x12
'daes', # 0x13
'daess', # 0x14
'daeng', # 0x15
'daej', # 0x16
'daec', # 0x17
'daek', # 0x18
'daet', # 0x19
'daep', # 0x1a
'daeh', # 0x1b
'dya', # 0x1c
'dyag', # 0x1d
'dyagg', # 0x1e
'dyags', # 0x1f
'dyan', # 0x20
'dyanj', # 0x21
'dyanh', # 0x22
'dyad', # 0x23
'dyal', # 0x24
'dyalg', # 0x25
'dyalm', # 0x26
'dyalb', # 0x27
'dyals', # 0x28
'dyalt', # 0x29
'dyalp', # 0x2a
'dyalh', # 0x2b
'dyam', # 0x2c
'dyab', # 0x2d
'dyabs', # 0x2e
'dyas', # 0x2f
'dyass', # 0x30
'dyang', # 0x31
'dyaj', # 0x32
'dyac', # 0x33
'dyak', # 0x34
'dyat', # 0x35
'dyap', # 0x36
'dyah', # 0x37
'dyae', # 0x38
'dyaeg', # 0x39
'dyaegg', # 0x3a
'dyaegs', # 0x3b
'dyaen', # 0x3c
'dyaenj', # 0x3d
'dyaenh', # 0x3e
'dyaed', # 0x3f
'dyael', # 0x40
'dyaelg', # 0x41
'dyaelm', # 0x42
'dyaelb', # 0x43
'dyaels', # 0x44
'dyaelt', # 0x45
'dyaelp', # 0x46
'dyaelh', # 0x47
'dyaem', # 0x48
'dyaeb', # 0x49
'dyaebs', # 0x4a
'dyaes', # 0x4b
'dyaess', # 0x4c
'dyaeng', # 0x4d
'dyaej', # 0x4e
'dyaec', # 0x4f
'dyaek', # 0x50
'dyaet', # 0x51
'dyaep', # 0x52
'dyaeh', # 0x53
'deo', # 0x54
'deog', # 0x55
'deogg', # 0x56
'deogs', # 0x57
'deon', # 0x58
'deonj', # 0x59
'deonh', # 0x5a
'deod', # 0x5b
'deol', # 0x5c
'deolg', # 0x5d
'deolm', # 0x5e
'deolb', # 0x5f
'deols', # 0x60
'deolt', # 0x61
'deolp', # 0x62
'deolh', # 0x63
'deom', # 0x64
'deob', # 0x65
'deobs', # 0x66
'deos', # 0x67
'deoss', # 0x68
'deong', # 0x69
'deoj', # 0x6a
'deoc', # 0x6b
'deok', # 0x6c
'deot', # 0x6d
'deop', # 0x6e
'deoh', # 0x6f
'de', # 0x70
'deg', # 0x71
'degg', # 0x72
'degs', # 0x73
'den', # 0x74
'denj', # 0x75
'denh', # 0x76
'ded', # 0x77
'del', # 0x78
'delg', # 0x79
'delm', # 0x7a
'delb', # 0x7b
'dels', # 0x7c
'delt', # 0x7d
'delp', # 0x7e
'delh', # 0x7f
'dem', # 0x80
'deb', # 0x81
'debs', # 0x82
'des', # 0x83
'dess', # 0x84
'deng', # 0x85
'dej', # 0x86
'dec', # 0x87
'dek', # 0x88
'det', # 0x89
'dep', # 0x8a
'deh', # 0x8b
'dyeo', # 0x8c
'dyeog', # 0x8d
'dyeogg', # 0x8e
'dyeogs', # 0x8f
'dyeon', # 0x90
'dyeonj', # 0x91
'dyeonh', # 0x92
'dyeod', # 0x93
'dyeol', # 0x94
'dyeolg', # 0x95
'dyeolm', # 0x96
'dyeolb', # 0x97
'dyeols', # 0x98
'dyeolt', # 0x99
'dyeolp', # 0x9a
'dyeolh', # 0x9b
'dyeom', # 0x9c
'dyeob', # 0x9d
'dyeobs', # 0x9e
'dyeos', # 0x9f
'dyeoss', # 0xa0
'dyeong', # 0xa1
'dyeoj', # 0xa2
'dyeoc', # 0xa3
'dyeok', # 0xa4
'dyeot', # 0xa5
'dyeop', # 0xa6
'dyeoh', # 0xa7
'dye', # 0xa8
'dyeg', # 0xa9
'dyegg', # 0xaa
'dyegs', # 0xab
'dyen', # 0xac
'dyenj', # 0xad
'dyenh', # 0xae
'dyed', # 0xaf
'dyel', # 0xb0
'dyelg', # 0xb1
'dyelm', # 0xb2
'dyelb', # 0xb3
'dyels', # 0xb4
'dyelt', # 0xb5
'dyelp', # 0xb6
'dyelh', # 0xb7
'dyem', # 0xb8
'dyeb', # 0xb9
'dyebs', # 0xba
'dyes', # 0xbb
'dyess', # 0xbc
'dyeng', # 0xbd
'dyej', # 0xbe
'dyec', # 0xbf
'dyek', # 0xc0
'dyet', # 0xc1
'dyep', # 0xc2
'dyeh', # 0xc3
'do', # 0xc4
'dog', # 0xc5
'dogg', # 0xc6
'dogs', # 0xc7
'don', # 0xc8
'donj', # 0xc9
'donh', # 0xca
'dod', # 0xcb
'dol', # 0xcc
'dolg', # 0xcd
'dolm', # 0xce
'dolb', # 0xcf
'dols', # 0xd0
'dolt', # 0xd1
'dolp', # 0xd2
'dolh', # 0xd3
'dom', # 0xd4
'dob', # 0xd5
'dobs', # 0xd6
'dos', # 0xd7
'doss', # 0xd8
'dong', # 0xd9
'doj', # 0xda
'doc', # 0xdb
'dok', # 0xdc
'dot', # 0xdd
'dop', # 0xde
'doh', # 0xdf
'dwa', # 0xe0
'dwag', # 0xe1
'dwagg', # 0xe2
'dwags', # 0xe3
'dwan', # 0xe4
'dwanj', # 0xe5
'dwanh', # 0xe6
'dwad', # 0xe7
'dwal', # 0xe8
'dwalg', # 0xe9
'dwalm', # 0xea
'dwalb', # 0xeb
'dwals', # 0xec
'dwalt', # 0xed
'dwalp', # 0xee
'dwalh', # 0xef
'dwam', # 0xf0
'dwab', # 0xf1
'dwabs', # 0xf2
'dwas', # 0xf3
'dwass', # 0xf4
'dwang', # 0xf5
'dwaj', # 0xf6
'dwac', # 0xf7
'dwak', # 0xf8
'dwat', # 0xf9
'dwap', # 0xfa
'dwah', # 0xfb
'dwae', # 0xfc
'dwaeg', # 0xfd
'dwaegg', # 0xfe
'dwaegs', # 0xff
)
| mit |
837468220/python-for-android | python3-alpha/python3-src/Lib/encodings/cp852.py | 272 | 35002 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp852',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT
0x00f2: 0x02db, # OGONEK
0x00f3: 0x02c7, # CARON
0x00f4: 0x02d8, # BREVE
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE
'\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE
'\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON
'\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON
'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON
'\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON
'\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE
'\xd7' # 0x009e -> MULTIPLICATION SIGN
'\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK
'\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON
'\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON
'\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK
'\xac' # 0x00aa -> NOT SIGN
'\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE
'\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON
'\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON
'\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE
'\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa4' # 0x00cf -> CURRENCY SIGN
'\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE
'\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE
'\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON
'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON
'\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON
'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA
'\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE
'\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON
'\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON
'\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON
'\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE
'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
'\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE
'\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
'\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA
'\xb4' # 0x00ef -> ACUTE ACCENT
'\xad' # 0x00f0 -> SOFT HYPHEN
'\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT
'\u02db' # 0x00f2 -> OGONEK
'\u02c7' # 0x00f3 -> CARON
'\u02d8' # 0x00f4 -> BREVE
'\xa7' # 0x00f5 -> SECTION SIGN
'\xf7' # 0x00f6 -> DIVISION SIGN
'\xb8' # 0x00f7 -> CEDILLA
'\xb0' # 0x00f8 -> DEGREE SIGN
'\xa8' # 0x00f9 -> DIAERESIS
'\u02d9' # 0x00fa -> DOT ABOVE
'\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON
'\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b8: 0x00f7, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE
0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE
0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON
0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON
0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON
0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE
0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE
0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK
0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON
0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON
0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE
0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE
0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON
0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON
0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE
0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON
0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON
0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE
0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE
0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON
0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA
0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON
0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA
0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA
0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON
0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON
0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE
0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON
0x02c7: 0x00f3, # CARON
0x02d8: 0x00f4, # BREVE
0x02d9: 0x00fa, # DOT ABOVE
0x02db: 0x00f2, # OGONEK
0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
aldian/tensorflow | tensorflow/contrib/boosted_trees/python/utils/losses_test.py | 33 | 3862 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for trainer hooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.boosted_trees.python.utils import losses
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class LossesTest(test_util.TensorFlowTestCase):
def test_per_example_exp_loss(self):
def _logit(p):
return np.log(p) - np.log(1 - p)
labels_positive = array_ops.ones([10, 1], dtypes.float32)
weights = array_ops.ones([10, 1], dtypes.float32)
labels_negative = array_ops.zeros([10, 1], dtypes.float32)
predictions_probs = np.array(
[[0.1], [0.2], [0.3], [0.4], [0.5], [0.6], [0.7], [0.8], [0.9], [0.99]],
dtype=np.float32)
prediction_logits = _logit(predictions_probs)
eps = 0.2
with self.test_session():
predictions_tensor = constant_op.constant(
prediction_logits, dtype=dtypes.float32)
loss_for_positives, _ = losses.per_example_exp_loss(
labels_positive, weights, predictions_tensor, eps=eps)
loss_for_negatives, _ = losses.per_example_exp_loss(
labels_negative, weights, predictions_tensor, eps=eps)
pos_loss = loss_for_positives.eval()
neg_loss = loss_for_negatives.eval()
# For positive labels, points <= 0.3 get max loss of e.
# For negative labels, these points have minimum loss of 1/e.
self.assertAllClose(np.exp(np.ones([2, 1])), pos_loss[:2], atol=1e-4)
self.assertAllClose(np.exp(-np.ones([2, 1])), neg_loss[:2], atol=1e-4)
# For positive lables, p oints with predictions 0.7 and larger get minimum
# loss value of 1/e. For negative labels, these points are wrongly
# classified and get loss e.
self.assertAllClose(np.exp(-np.ones([4, 1])), pos_loss[6:10], atol=1e-4)
self.assertAllClose(np.exp(np.ones([4, 1])), neg_loss[6:10], atol=1e-4)
# Points in between 0.5-eps, 0..5+eps get loss exp(-label_m*y), where
# y = 1/eps *x -1/(2eps), where x is the probability and label_m is either
# 1 or -1 (for label of 0).
self.assertAllClose(
np.exp(-(predictions_probs[2:6] * 1.0 / eps - 0.5 / eps)),
pos_loss[2:6], atol=1e-4)
self.assertAllClose(
np.exp(predictions_probs[2:6] * 1.0 / eps - 0.5 / eps),
neg_loss[2:6], atol=1e-4)
def test_per_example_squared_loss(self):
labels = np.array([[0.123], [224.2], [-3], [2], [.3]], dtype=np.float32)
weights = array_ops.ones([5, 1], dtypes.float32)
predictions = np.array(
[[0.123], [23.2], [233], [52], [3]], dtype=np.float32)
with self.test_session():
loss_tensor, _ = losses.per_example_squared_loss(labels, weights,
predictions)
loss = loss_tensor.eval()
self.assertAllClose(
np.square(labels[:5] - predictions[:5]), loss[:5], atol=1e-4)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
jacques/joyent-connector | test/feedvalidator/feedvalidator/base.py | 2 | 16608 | """$Id: base.py 624 2006-05-20 23:42:36Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 624 $"
__date__ = "$Date: 2006-05-21 11:42:36 +1200 (Sun, 21 May 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from xml.sax.handler import ContentHandler
from xml.sax.xmlreader import Locator
from logging import NonCanonicalURI, NotUTF8
import re
# references:
# http://web.resource.org/rss/1.0/modules/standard.html
# http://web.resource.org/rss/1.0/modules/proposed.html
# http://dmoz.org/Reference/Libraries/Library_and_Information_Science/Technical_Services/Cataloguing/Metadata/RDF/Applications/RSS/Specifications/RSS1.0_Modules/
namespaces = {
"http://webns.net/mvcb/": "admin",
"http://purl.org/rss/1.0/modules/aggregation/": "ag",
"http://purl.org/rss/1.0/modules/annotate/": "annotate",
"http://media.tangent.org/rss/1.0/": "audio",
"http://backend.userland.com/blogChannelModule": "blogChannel",
"http://web.resource.org/cc/": "cc",
"http://www.microsoft.com/schemas/rss/core/2005": "cf",
"http://backend.userland.com/creativeCommonsRssModule": "creativeCommons",
"http://purl.org/rss/1.0/modules/company": "company",
"http://purl.org/rss/1.0/modules/content/": "content",
"http://my.theinfo.org/changed/1.0/rss/": "cp",
"http://purl.org/dc/elements/1.1/": "dc",
"http://purl.org/dc/terms/": "dcterms",
"http://purl.org/rss/1.0/modules/email/": "email",
"http://purl.org/rss/1.0/modules/event/": "ev",
"http://www.w3.org/2003/01/geo/wgs84_pos#": "geo",
"http://geourl.org/rss/module/": "geourl",
"http://postneo.com/icbm": "icbm",
"http://purl.org/rss/1.0/modules/image/": "image",
"http://www.itunes.com/dtds/podcast-1.0.dtd": "itunes",
"http://xmlns.com/foaf/0.1/": "foaf",
"http://purl.org/rss/1.0/modules/link/": "l",
"http://search.yahoo.com/mrss/": "media",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://www.w3.org/2000/01/rdf-schema#": "rdfs",
"http://purl.org/rss/1.0/modules/reference/": "ref",
"http://purl.org/rss/1.0/modules/richequiv/": "reqv",
"http://purl.org/rss/1.0/modules/rss091#": "rss091",
"http://purl.org/rss/1.0/modules/search/": "search",
"http://purl.org/rss/1.0/modules/slash/": "slash",
"http://purl.org/rss/1.0/modules/servicestatus/": "ss",
"http://hacks.benhammersley.com/rss/streaming/": "str",
"http://purl.org/rss/1.0/modules/subscription/": "sub",
"http://purl.org/rss/1.0/modules/syndication/": "sy",
"http://purl.org/rss/1.0/modules/taxonomy/": "taxo",
"http://purl.org/rss/1.0/modules/threading/": "thr",
"http://purl.org/syndication/thread/1.0": "thr",
"http://madskills.com/public/xml/rss/module/trackback/": "trackback",
"http://wellformedweb.org/CommentAPI/": "wfw",
"http://purl.org/rss/1.0/modules/wiki/": "wiki",
"http://www.usemod.com/cgi-bin/mb.pl?ModWiki": "wiki",
"http://schemas.xmlsoap.org/soap/envelope/": "soap",
"http://www.w3.org/2005/Atom": "atom",
"http://www.w3.org/1999/xhtml": "xhtml",
"http://my.netscape.com/rdf/simple/0.9/": "rss090",
"http://purl.org/net/rss1.1#": "rss11",
"http://base.google.com/ns/1.0": "g",
}
def near_miss(ns):
try:
return re.match(".*\w", ns).group().lower()
except:
return ns
nearly_namespaces = dict([(near_miss(u),p) for u,p in namespaces.items()])
stdattrs = [(u'http://www.w3.org/XML/1998/namespace', u'base'),
(u'http://www.w3.org/XML/1998/namespace', u'lang'),
(u'http://www.w3.org/XML/1998/namespace', u'space')]
#
# From the SAX parser's point of view, this class is the one responsible for
# handling SAX events. In actuality, all this class does is maintain a
# pushdown stack of the *real* content handlers, and delegates sax events
# to the current one.
#
class SAXDispatcher(ContentHandler):
firstOccurrenceOnly = 0
def __init__(self, base, selfURIs, encoding):
from root import root
ContentHandler.__init__(self)
self.lastKnownLine = 1
self.lastKnownColumn = 0
self.loggedEvents = []
self.feedType = 0
try:
self.xmlBase = base.encode('idna')
except:
self.xmlBase = base
self.selfURIs = selfURIs
self.encoding = encoding
self.handler_stack=[[root(self, base)]]
validatorBase.defaultNamespaces = []
def setDocumentLocator(self, locator):
self.locator = locator
ContentHandler.setDocumentLocator(self, self.locator)
def setFirstOccurrenceOnly(self, firstOccurrenceOnly=1):
self.firstOccurrenceOnly = firstOccurrenceOnly
def startPrefixMapping(self, prefix, uri):
if namespaces.has_key(uri):
if not namespaces[uri] == prefix and prefix:
from logging import NonstdPrefix
self.log(NonstdPrefix({'preferred':namespaces[uri], 'ns':uri}))
elif prefix=='wiki' and uri.find('usemod')>=0:
from logging import ObsoleteWikiNamespace
self.log(ObsoleteWikiNamespace({'preferred':namespaces[uri], 'ns':uri}))
elif prefix in namespaces.values():
from logging import ReservedPrefix
preferredURI = [key for key, value in namespaces.items() if value == prefix][0]
self.log(ReservedPrefix({'prefix':prefix, 'ns':preferredURI}))
def startElementNS(self, name, qname, attrs):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
qname, name = name
for handler in iter(self.handler_stack[-1]):
handler.startElementNS(name, qname, attrs)
if len(attrs):
present = attrs.getNames()
unexpected = filter(lambda x: x not in stdattrs, present)
for handler in iter(self.handler_stack[-1]):
ean = handler.getExpectedAttrNames()
if ean: unexpected = filter(lambda x: x not in ean, unexpected)
for u in unexpected:
if u[0] and near_miss(u[0]) not in nearly_namespaces: continue
from logging import UnexpectedAttribute
if not u[0]: u=u[1]
self.log(UnexpectedAttribute({"parent":name, "attribute":u, "element":name}))
def resolveEntity(self, publicId, systemId):
if not publicId and not systemId:
import cStringIO
return cStringIO.StringIO()
try:
def log(exception):
from logging import SAXError
self.log(SAXError({'exception':str(exception)}))
if self.xmlvalidator:
self.xmlvalidator(log)
self.xmlvalidator=0
except:
pass
if (publicId=='-//Netscape Communications//DTD RSS 0.91//EN' and
systemId=='http://my.netscape.com/publish/formats/rss-0.91.dtd'):
from logging import ValidDoctype
self.log(ValidDoctype({}))
else:
from logging import ContainsSystemEntity
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
self.log(ContainsSystemEntity({}))
from StringIO import StringIO
return StringIO()
def skippedEntity(self, name):
from logging import ValidDoctype
if [e for e in self.loggedEvents if e.__class__ == ValidDoctype]:
from htmlentitydefs import name2codepoint
if name in name2codepoint: return
from logging import UndefinedNamedEntity
self.log(UndefinedNamedEntity({'value':name}))
def characters(self, string):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
for handler in iter(self.handler_stack[-1]):
handler.characters(string)
def endElementNS(self, name, qname):
self.lastKnownLine = self.locator.getLineNumber()
self.lastKnownColumn = self.locator.getColumnNumber()
qname, name = name
for handler in iter(self.handler_stack[-1]):
handler.endElementNS(name, qname)
del self.handler_stack[-1]
def push(self, handlers, name, attrs, parent):
try:
for handler in iter(handlers):
handler.setElement(name, attrs, parent)
handler.value=""
handler.prevalidate()
except:
handlers.setElement(name, attrs, parent)
handlers.value=""
handlers.prevalidate()
handlers = [handlers]
self.handler_stack.append(handlers)
def log(self, event, offset=(0,0)):
def findDuplicate(self, event):
duplicates = [e for e in self.loggedEvents if e.__class__ == event.__class__]
if duplicates and (event.__class__ in [NonCanonicalURI]):
return duplicates[0]
for dup in duplicates:
for k, v in event.params.items():
if k != 'value':
if not k in dup.params or dup.params[k] != v: break
else:
return dup
if event.params.has_key('element') and event.params['element']:
if not isinstance(event.params['element'],tuple):
event.params['element']=':'.join(event.params['element'].split('_', 1))
elif event.params['element'][0]==u'http://www.w3.org/XML/1998/namespace':
event.params['element'] = 'xml:' + event.params['element'][-1]
if self.firstOccurrenceOnly:
dup = findDuplicate(self, event)
if dup:
dup.params['msgcount'] = dup.params['msgcount'] + 1
return
event.params['msgcount'] = 1
try:
line = self.locator.getLineNumber() + offset[0]
backupline = self.lastKnownLine
column = (self.locator.getColumnNumber() or 0) + offset[1]
backupcolumn = self.lastKnownColumn
except AttributeError:
line = backupline = column = backupcolumn = 1
event.params['line'] = line
event.params['backupline'] = backupline
event.params['column'] = column
event.params['backupcolumn'] = backupcolumn
self.loggedEvents.append(event)
def error(self, exception):
from logging import SAXError
self.log(SAXError({'exception':str(exception)}))
raise exception
fatalError=error
warning=error
def getFeedType(self):
return self.feedType
def setFeedType(self, feedType):
self.feedType = feedType
#
# This base class for content handlers keeps track of such administrative
# details as the parent of the current element, and delegating both log
# and push events back up the stack. It will also concatenate up all of
# the SAX events associated with character data into a value, handing such
# things as CDATA and entities.
#
# Subclasses are expected to declare "do_name" methods for every
# element that they support. These methods are expected to return the
# appropriate handler for the element.
#
# The name of the element and the names of the children processed so
# far are also maintained.
#
# Hooks are also provided for subclasses to do "prevalidation" and
# "validation".
#
from logging import TYPE_RSS2
class validatorBase(ContentHandler):
defaultNamespaces = []
def __init__(self):
ContentHandler.__init__(self)
self.value = ""
self.attrs = None
self.children = []
self.isValid = 1
self.name = None
self.itunes = False
def setElement(self, name, attrs, parent):
self.name = name
self.attrs = attrs
self.parent = parent
self.dispatcher = parent.dispatcher
self.xmlLang = parent.xmlLang
if attrs and attrs.has_key((u'http://www.w3.org/XML/1998/namespace', u'base')):
self.xmlBase=attrs.getValue((u'http://www.w3.org/XML/1998/namespace', u'base'))
from validators import rfc3987
self.validate_attribute((u'http://www.w3.org/XML/1998/namespace',u'base'),
rfc3987)
from urlparse import urljoin
self.xmlBase = urljoin(parent.xmlBase, self.xmlBase)
else:
self.xmlBase = parent.xmlBase
return self
def validate_attribute(self, name, rule):
if not isinstance(rule,validatorBase): rule = rule()
rule.setElement(name, {}, self)
if isinstance(name,str): name = (None,name)
rule.value=self.attrs.getValue(name)
rule.validate()
def getExpectedAttrNames(self):
None
def unknown_starttag(self, name, qname, attrs):
from validators import any
return any(self, name, qname, attrs)
def startElementNS(self, name, qname, attrs):
if attrs.has_key((u'http://www.w3.org/XML/1998/namespace', u'lang')):
self.xmlLang=attrs.getValue((u'http://www.w3.org/XML/1998/namespace', u'lang'))
if self.xmlLang:
from validators import iso639_validate
iso639_validate(self.log, self.xmlLang, "xml:lang", name)
from validators import eater
feedtype=self.getFeedType()
if (not qname) and feedtype and (feedtype!=TYPE_RSS2):
from logging import UndeterminableVocabulary
self.log(UndeterminableVocabulary({"parent":self.name, "element":name, "namespace":'""'}))
qname="null"
if qname in self.defaultNamespaces: qname=None
nm_qname = near_miss(qname)
if nearly_namespaces.has_key(nm_qname):
prefix = nearly_namespaces[nm_qname]
qname, name = None, prefix + "_" + name
if prefix == 'itunes' and not self.itunes and not self.parent.itunes:
if hasattr(self, 'setItunes'): self.setItunes(True)
# ensure all attribute namespaces are properly defined
for (namespace,attr) in attrs.keys():
if ':' in attr and not namespace:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":attr}))
for key, string in attrs.items():
for c in string:
if 0x80 <= ord(c) <= 0x9F:
from validators import BadCharacters
self.log(BadCharacters({"parent":name, "element":key[-1]}))
if qname:
handler = self.unknown_starttag(name, qname, attrs)
name="unknown_"+name
else:
try:
self.child=name
handler = getattr(self, "do_" + name.replace("-","_"))()
except AttributeError:
if name.find(':') != -1:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
handler = eater()
elif not qname:
from logging import UndefinedElement
self.log(UndefinedElement({"parent": ':'.join(self.name.split("_",1)), "element":name}))
handler = eater()
else:
handler = self.unknown_starttag(name, qname, attrs)
name="unknown_"+name
self.push(handler, name, attrs)
# MAP - always append name, even if already exists (we need this to
# check for too many hour elements in skipHours, and it doesn't
# hurt anything else)
self.children.append(name)
def normalizeWhitespace(self):
self.value = self.value.strip()
def endElementNS(self, name, qname):
self.normalizeWhitespace()
self.validate()
if self.isValid and self.name:
from validators import ValidElement
self.log(ValidElement({"parent":self.parent.name, "element":name}))
def textOK(self):
from validators import UnexpectedText
self.log(UnexpectedText({"element":self.name,"parent":self.parent.name}))
def characters(self, string):
if string.strip(): self.textOK()
line=column=0
pc=' '
for c in string:
# latin characters double encoded as utf-8
if 0x80 <= ord(c) <= 0xBF:
if 0xC2 <= ord(pc) <= 0xC3:
try:
string.encode('iso-8859-1').decode('utf-8')
from validators import BadCharacters
self.log(BadCharacters({"parent":self.parent.name, "element":self.name}), offset=(line,max(1,column-1)))
except:
pass
pc = c
# win1252
if 0x80 <= ord(c) <= 0x9F:
from validators import BadCharacters
self.log(BadCharacters({"parent":self.parent.name, "element":self.name}), offset=(line,column))
column=column+1
if ord(c) in (10,13):
column=0
line=line+1
self.value = self.value + string
def log(self, event, offset=(0,0)):
self.dispatcher.log(event, offset)
self.isValid = 0
def setFeedType(self, feedType):
self.dispatcher.setFeedType(feedType)
def getFeedType(self):
return self.dispatcher.getFeedType()
def push(self, handler, name, value):
self.dispatcher.push(handler, name, value, self)
def leaf(self):
from validators import text
return text()
def prevalidate(self):
pass
def validate(self):
pass
| gpl-2.0 |
azunite/gyp | test/rules-dirname/gyptest-dirname.py | 72 | 1475 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple rules when using an explicit build target of 'all'.
"""
import TestGyp
import os
import sys
test = TestGyp.TestGyp(formats=['make', 'ninja', 'android', 'xcode', 'msvs'])
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('actions.gyp', chdir='relocate/src')
expect = """\
no dir here
hi c
hello baz
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir'
else:
chdir = 'relocate/src'
test.run_built_executable('gencc_int_output', chdir=chdir, stdout=expect)
if test.format == 'msvs':
test.run_built_executable('gencc_int_output_external', chdir=chdir,
stdout=expect)
test.must_match('relocate/src/subdir/foo/bar/baz.dirname',
os.path.join('foo', 'bar'))
test.must_match('relocate/src/subdir/a/b/c.dirname',
os.path.join('a', 'b'))
# FIXME the xcode and make generators incorrectly convert RULE_INPUT_PATH
# to an absolute path, making the tests below fail!
if test.format != 'xcode' and test.format != 'make':
test.must_match('relocate/src/subdir/foo/bar/baz.path',
os.path.join('foo', 'bar', 'baz.printvars'))
test.must_match('relocate/src/subdir/a/b/c.path',
os.path.join('a', 'b', 'c.printvars'))
test.pass_test()
| bsd-3-clause |
toastwaffle/GData-Backup | utils.py | 1 | 4938 | """Utility functions for backing up contacts and calendars."""
import os
import sys
import gflags
import pathlib
import contacts_pb2
FLAGS = gflags.FLAGS
def resolve_path(path):
"""Ensure that a directory exists.
Args:
path: (str) A path to a directory, optionally relative or including the
~ home directory expansion.
Raises:
OSError: If path is nonsensical or cannot be created.
"""
path_obj = pathlib.Path(os.path.expanduser(path))
try:
return str(path_obj.resolve())
except OSError as _:
os.mkdir(str(path_obj))
return str(path_obj.resolve())
def output(message, level=0):
"""Output a message to the console at a given verbosity level.
Args:
message: (str) The message to output. Will not automatically insert a
new line.
level: (int) Verbosity level to output at. Higher numbers are more
likely to be hidden.
"""
if level <= FLAGS.verbosity:
sys.stdout.write(message)
def contact_to_protobuf(contact, contact_list_pb): # pylint: disable=too-many-branches
"""Convert a contact from the Google API to our protobuf type.
Args:
contact: (object) Contact object returned from the Google API.
contact_list_pb: (contacts_pb2.ContactList) Contact list to add the
contact to.
"""
contact_pb = contact_list_pb.contact.add()
if contact.name is not None:
if contact.name.full_name is not None:
contact_pb.name = contact.name.full_name.text
else:
name_parts = []
if contact.name.name_prefix is not None:
name_parts.append(contact.name.name_prefix.text)
if contact.name.given_name is not None:
name_parts.append(contact.name.given_name.text)
if contact.name.additional_name is not None:
name_parts.append(contact.name.additional_name.text)
if contact.name.family_name is not None:
name_parts.append(contact.name.family_name.text)
if contact.name.name_suffix is not None:
name_parts.append(contact.name.name_suffix.text)
contact_pb.name = " ".join(name_parts)
elif contact.title.text is not None:
contact_pb.name = contact.title.text
for email in contact.email:
email_pb = contact_pb.email.add()
email_pb.address = email.address
email_pb.primary = (email.primary == 'true')
if email.label is not None:
email_pb.label = email.label
try:
email_pb.type = contacts_pb2.Type.Value(email.rel[33:].upper())
except AttributeError:
email_pb.type = contacts_pb2.UNKNOWN
except TypeError:
email_pb.type = contacts_pb2.UNKNOWN
for phone in contact.phone_number:
phone_pb = contact_pb.phone.add()
phone_pb.number = phone.text
phone_pb.primary = (phone.primary == 'true')
if phone.label is not None:
phone_pb.label = phone.label
try:
phone_pb.type = contacts_pb2.Type.Value(phone.rel[33:].upper())
except AttributeError:
phone_pb.type = contacts_pb2.Type.UNKNOWN
except TypeError:
phone_pb.type = contacts_pb2.UNKNOWN
def event_to_protobuf(event, calendar_pb):
"""Convert an event from the Google API to our protobuf type.
Args:
event: (object) Event object returned from the Google API.
calendar_pb: (calendar_pb2.Calendar) Calendar to add the
event to.
"""
event_pb = calendar_pb.event.add()
try:
event_pb.summary = event['summary']
except KeyError:
pass
try:
event_pb.description = event['description']
except KeyError:
pass
try:
event_pb.location = event['location']
except KeyError:
pass
try:
event_pb.start = event['start']['date']
except KeyError:
pass
try:
event_pb.start = event['start']['dateTime']
except KeyError:
pass
try:
event_pb.start_timezone = event['start']['timeZone']
except KeyError:
pass
if 'endTimeUnspecified' in event and not event['endTimeUnspecified']:
try:
event_pb.end = event['end']['date']
except KeyError:
pass
try:
event_pb.end = event['end']['dateTime']
except KeyError:
pass
try:
event_pb.end_timezone = event['end']['timeZone']
except KeyError:
pass
event_pb.use_default_reminder = event['reminders']['useDefault']
try:
for reminder in event['reminders']['overrides']:
event_pb.reminder.add(
method=reminder['method'],
minutes=reminder['minutes']
)
except KeyError:
pass
| mit |
emawind84/readthedocs.org | readthedocs/core/utils/__init__.py | 23 | 3304 | import getpass
import logging
import os
from urlparse import urlparse
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from readthedocs.builds.constants import LATEST
from readthedocs.builds.constants import LATEST_VERBOSE_NAME
from readthedocs.builds.models import Build
log = logging.getLogger(__name__)
SYNC_USER = getattr(settings, 'SYNC_USER', getpass.getuser())
def run_on_app_servers(command):
"""
A helper to copy a single file across app servers
"""
log.info("Running %s on app servers" % command)
ret_val = 0
if getattr(settings, "MULTIPLE_APP_SERVERS", None):
for server in settings.MULTIPLE_APP_SERVERS:
ret = os.system("ssh %s@%s %s" % (SYNC_USER, server, command))
if ret != 0:
ret_val = ret
return ret_val
else:
ret = os.system(command)
return ret
def clean_url(url):
parsed = urlparse(url)
if parsed.scheme:
scheme, netloc = parsed.scheme, parsed.netloc
elif parsed.netloc:
scheme, netloc = "http", parsed.netloc
else:
scheme, netloc = "http", parsed.path
return netloc
def cname_to_slug(host):
from dns import resolver
answer = [ans for ans in resolver.query(host, 'CNAME')][0]
domain = answer.target.to_unicode()
slug = domain.split('.')[0]
return slug
def trigger_build(project, version=None, record=True, force=False, basic=False):
"""
An API to wrap the triggering of a build.
"""
# Avoid circular import
from readthedocs.projects.tasks import update_docs
if project.skip:
return None
if not version:
version = project.versions.get(slug=LATEST)
if record:
build = Build.objects.create(
project=project,
version=version,
type='html',
state='triggered',
success=True,
)
update_docs.delay(pk=project.pk, version_pk=version.pk, record=record,
force=force, basic=basic, build_pk=build.pk)
else:
build = None
update_docs.delay(pk=project.pk, version_pk=version.pk, record=record,
force=force, basic=basic)
return build
def send_email(recipient, subject, template, template_html, context=None,
request=None):
'''
Send multipart email
recipient
Email recipient address
subject
Email subject header
template
Plain text template to send
template_html
HTML template to send as new message part
context
A dictionary to pass into the template calls
request
Request object for determining absolute URL
'''
if request:
scheme = 'https' if request.is_secure() else 'http'
context['uri'] = '{scheme}://{host}'.format(scheme=scheme,
host=request.get_host())
ctx = {}
ctx.update(context)
msg = EmailMultiAlternatives(
subject,
get_template(template).render(ctx),
settings.DEFAULT_FROM_EMAIL,
[recipient]
)
msg.attach_alternative(get_template(template_html).render(ctx), 'text/html')
msg.send()
| mit |
TheWardoctor/Wardoctors-repo | plugin.video.oculus/resources/lib/plugins/unidecode/x0a4.py | 252 | 4437 | data = (
'qiet', # 0x00
'qiex', # 0x01
'qie', # 0x02
'qiep', # 0x03
'quot', # 0x04
'quox', # 0x05
'quo', # 0x06
'quop', # 0x07
'qot', # 0x08
'qox', # 0x09
'qo', # 0x0a
'qop', # 0x0b
'qut', # 0x0c
'qux', # 0x0d
'qu', # 0x0e
'qup', # 0x0f
'qurx', # 0x10
'qur', # 0x11
'qyt', # 0x12
'qyx', # 0x13
'qy', # 0x14
'qyp', # 0x15
'qyrx', # 0x16
'qyr', # 0x17
'jjit', # 0x18
'jjix', # 0x19
'jji', # 0x1a
'jjip', # 0x1b
'jjiet', # 0x1c
'jjiex', # 0x1d
'jjie', # 0x1e
'jjiep', # 0x1f
'jjuox', # 0x20
'jjuo', # 0x21
'jjuop', # 0x22
'jjot', # 0x23
'jjox', # 0x24
'jjo', # 0x25
'jjop', # 0x26
'jjut', # 0x27
'jjux', # 0x28
'jju', # 0x29
'jjup', # 0x2a
'jjurx', # 0x2b
'jjur', # 0x2c
'jjyt', # 0x2d
'jjyx', # 0x2e
'jjy', # 0x2f
'jjyp', # 0x30
'njit', # 0x31
'njix', # 0x32
'nji', # 0x33
'njip', # 0x34
'njiet', # 0x35
'njiex', # 0x36
'njie', # 0x37
'njiep', # 0x38
'njuox', # 0x39
'njuo', # 0x3a
'njot', # 0x3b
'njox', # 0x3c
'njo', # 0x3d
'njop', # 0x3e
'njux', # 0x3f
'nju', # 0x40
'njup', # 0x41
'njurx', # 0x42
'njur', # 0x43
'njyt', # 0x44
'njyx', # 0x45
'njy', # 0x46
'njyp', # 0x47
'njyrx', # 0x48
'njyr', # 0x49
'nyit', # 0x4a
'nyix', # 0x4b
'nyi', # 0x4c
'nyip', # 0x4d
'nyiet', # 0x4e
'nyiex', # 0x4f
'nyie', # 0x50
'nyiep', # 0x51
'nyuox', # 0x52
'nyuo', # 0x53
'nyuop', # 0x54
'nyot', # 0x55
'nyox', # 0x56
'nyo', # 0x57
'nyop', # 0x58
'nyut', # 0x59
'nyux', # 0x5a
'nyu', # 0x5b
'nyup', # 0x5c
'xit', # 0x5d
'xix', # 0x5e
'xi', # 0x5f
'xip', # 0x60
'xiet', # 0x61
'xiex', # 0x62
'xie', # 0x63
'xiep', # 0x64
'xuox', # 0x65
'xuo', # 0x66
'xot', # 0x67
'xox', # 0x68
'xo', # 0x69
'xop', # 0x6a
'xyt', # 0x6b
'xyx', # 0x6c
'xy', # 0x6d
'xyp', # 0x6e
'xyrx', # 0x6f
'xyr', # 0x70
'yit', # 0x71
'yix', # 0x72
'yi', # 0x73
'yip', # 0x74
'yiet', # 0x75
'yiex', # 0x76
'yie', # 0x77
'yiep', # 0x78
'yuot', # 0x79
'yuox', # 0x7a
'yuo', # 0x7b
'yuop', # 0x7c
'yot', # 0x7d
'yox', # 0x7e
'yo', # 0x7f
'yop', # 0x80
'yut', # 0x81
'yux', # 0x82
'yu', # 0x83
'yup', # 0x84
'yurx', # 0x85
'yur', # 0x86
'yyt', # 0x87
'yyx', # 0x88
'yy', # 0x89
'yyp', # 0x8a
'yyrx', # 0x8b
'yyr', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'Qot', # 0x90
'Li', # 0x91
'Kit', # 0x92
'Nyip', # 0x93
'Cyp', # 0x94
'Ssi', # 0x95
'Ggop', # 0x96
'Gep', # 0x97
'Mi', # 0x98
'Hxit', # 0x99
'Lyr', # 0x9a
'Bbut', # 0x9b
'Mop', # 0x9c
'Yo', # 0x9d
'Put', # 0x9e
'Hxuo', # 0x9f
'Tat', # 0xa0
'Ga', # 0xa1
'[?]', # 0xa2
'[?]', # 0xa3
'Ddur', # 0xa4
'Bur', # 0xa5
'Gguo', # 0xa6
'Nyop', # 0xa7
'Tu', # 0xa8
'Op', # 0xa9
'Jjut', # 0xaa
'Zot', # 0xab
'Pyt', # 0xac
'Hmo', # 0xad
'Yit', # 0xae
'Vur', # 0xaf
'Shy', # 0xb0
'Vep', # 0xb1
'Za', # 0xb2
'Jo', # 0xb3
'[?]', # 0xb4
'Jjy', # 0xb5
'Got', # 0xb6
'Jjie', # 0xb7
'Wo', # 0xb8
'Du', # 0xb9
'Shur', # 0xba
'Lie', # 0xbb
'Cy', # 0xbc
'Cuop', # 0xbd
'Cip', # 0xbe
'Hxop', # 0xbf
'Shat', # 0xc0
'[?]', # 0xc1
'Shop', # 0xc2
'Che', # 0xc3
'Zziet', # 0xc4
'[?]', # 0xc5
'Ke', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| apache-2.0 |
ycaihua/stk-code | tools/nightbuilder/package.py | 20 | 2585 | #!/bin/python
# From Supertuxkart SVN revision $Revision$
# Copyright (C) 2012 Jean-manuel clemencon (samuncle)
# Class used to package the project
################################################################################
from subprocess import check_output
from utils import *
class Package:
"""
Interface for the builder
"""
__revision = 0
__listFile = []
#-------------------------------------------------------------------------------
def __init__ (self, revision, compressDir, os,platform):
"""
Constructor of the builder class
"""
self.__os = os
self.__revision = str(revision)
self.__platform = platform
self.__compressDir = compressDir
#-------------------------------------------------------------------------------
def addFile(self, inFile, outFile, workingDir):
"""
Packing a file
"""
# filename
name = outFile \
+ "_" + self.__os \
+ self.__platform \
+ "_" + self.__revision \
+ ".zip"
self.__listFile.append(name)
# prepare the command
command = "zip " \
+ self.__compressDir \
+ "/" + name \
+ " " + inFile
# execute the command to pack the binary
changeDir = Cdir(workingDir)
check_output([command], shell=True)
del changeDir
#-------------------------------------------------------------------------------
def addDir(self, inFile, outFile, workingDir, exclude):
"""
Packing a directory
"""
# filename
name = outFile \
+ "_" + self.__os \
+ "_" + self.__revision \
+ ".zip"
self.__listFile.append(name)
# prepare the command
command = "zip -r --exclude=" \
+ exclude \
+ " " +self.__compressDir \
+ "/" + name \
+ " " + inFile
# execute the command to pack the binary
changeDir = Cdir(workingDir)
check_output([command], shell=True)
del changeDir
#-------------------------------------------------------------------------------
def getFiles(self):
"""
Return the list of file/directory added
"""
return self.__listFile
| gpl-3.0 |
ArchAssault-Project/archassaultweb | main/migrations/0053_auto__add_field_package_pgp_signature.py | 4 | 11962 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column('packages', 'pgp_signature', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
db.delete_column('packages', 'pgp_signature')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.arch': {
'Meta': {'ordering': "['name']", 'object_name': 'Arch', 'db_table': "'arches'"},
'agnostic': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'main.donor': {
'Meta': {'ordering': "['name']", 'object_name': 'Donor', 'db_table': "'donors'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'main.package': {
'Meta': {'ordering': "('pkgname',)", 'object_name': 'Package', 'db_table': "'packages'"},
'arch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'packages'", 'to': "orm['main.Arch']"}),
'build_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'compressed_size': ('main.models.PositiveBigIntegerField', [], {}),
'epoch': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'files_last_update': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'flag_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'installed_size': ('main.models.PositiveBigIntegerField', [], {}),
'last_update': ('django.db.models.fields.DateTimeField', [], {}),
'packager': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'packager_str': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pgp_signature': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pkgbase': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pkgdesc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'pkgname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pkgrel': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkgver': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'repo': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'packages'", 'to': "orm['main.Repo']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
'main.packagedepend': {
'Meta': {'object_name': 'PackageDepend', 'db_table': "'package_depends'"},
'depname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'depvcmp': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'optional': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"})
},
'main.packagefile': {
'Meta': {'object_name': 'PackageFile', 'db_table': "'package_files'"},
'directory': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_directory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"})
},
'main.repo': {
'Meta': {'ordering': "['name']", 'object_name': 'Repo', 'db_table': "'repos'"},
'bugs_category': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'bugs_project': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'staging': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'svn_root': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'testing': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'main.todolist': {
'Meta': {'object_name': 'Todolist', 'db_table': "'todolists'"},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.todolistpkg': {
'Meta': {'unique_together': "(('list', 'pkg'),)", 'object_name': 'TodolistPkg', 'db_table': "'todolist_pkgs'"},
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Todolist']"}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"})
},
'main.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'user_profiles'"},
'alias': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'allowed_repos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Repo']", 'symmetrical': 'False', 'blank': 'True'}),
'favorite_distros': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interests': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'notify': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'other_contact': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pgp_key': ('devel.fields.PGPKeyField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.FileField', [], {'default': "'devs/silhouette.png'", 'max_length': '100'}),
'public_email': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'roles': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'time_zone': ('django.db.models.fields.CharField', [], {'default': "'UTC'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'yob': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['main']
| gpl-2.0 |
qma/pants | src/python/pants/backend/jvm/tasks/jvmdoc_gen.py | 2 | 8223 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import collections
import contextlib
import multiprocessing
import os
import subprocess
from twitter.common.collections import OrderedSet
from pants.backend.jvm.tasks.jvm_task import JvmTask
from pants.base.exceptions import TaskError
from pants.binaries import binary_util
from pants.util.dirutil import safe_mkdir, safe_walk
Jvmdoc = collections.namedtuple('Jvmdoc', ['tool_name', 'product_type'])
class JvmdocGen(JvmTask):
@classmethod
def jvmdoc(cls):
"""Subclasses should return their Jvmdoc configuration."""
raise NotImplementedError()
@classmethod
def register_options(cls, register):
super(JvmdocGen, cls).register_options(register)
tool_name = cls.jvmdoc().tool_name
register('--include-codegen', default=False, action='store_true',
fingerprint=True,
help='Create {0} for generated code.'.format(tool_name))
register('--transitive', default=True, action='store_true',
fingerprint=True,
help='Create {0} for the transitive closure of internal targets reachable from the '
'roots specified on the command line.'.format(tool_name))
register('--combined', default=False, action='store_true',
fingerprint=True,
help='Generate {0} for all targets combined, instead of each target '
'individually.'.format(tool_name))
register('--open', default=False, action='store_true',
help='Open the generated {0} in a browser (implies --combined).'.format(tool_name))
register('--ignore-failure', default=False, action='store_true',
fingerprint=True,
help='Do not consider {0} errors to be build errors.'.format(tool_name))
# TODO(John Sirois): This supports the JarPublish task and is an abstraction leak.
# It allows folks doing a local-publish to skip an expensive and un-needed step.
# Remove this flag and instead support conditional requirements being registered against
# the round manager. This may require incremental or windowed flag parsing that happens bit by
# bit as tasks are recursively prepared vs. the current all-at once style.
register('--skip', default=False, action='store_true',
fingerprint=True,
help='Skip {0} generation.'.format(tool_name))
@classmethod
def product_types(cls):
return [cls.jvmdoc().product_type]
def __init__(self, *args, **kwargs):
super(JvmdocGen, self).__init__(*args, **kwargs)
options = self.get_options()
self._include_codegen = options.include_codegen
self.transitive = options.transitive
self.open = options.open
self.combined = self.open or options.combined
self.ignore_failure = options.ignore_failure
self.skip = options.skip
def generate_doc(self, language_predicate, create_jvmdoc_command):
"""
Generate an execute method given a language predicate and command to create documentation
language_predicate: a function that accepts a target and returns True if the target is of that
language
create_jvmdoc_command: (classpath, directory, *targets) -> command (string) that will generate
documentation documentation for targets
"""
if self.skip:
return
catalog = self.context.products.isrequired(self.jvmdoc().product_type)
if catalog and self.combined:
raise TaskError(
'Cannot provide {} target mappings for combined output'.format(self.jvmdoc().product_type))
def docable(tgt):
return language_predicate(tgt) and (self._include_codegen or not tgt.is_codegen)
targets = self.context.targets(predicate=docable)
if not targets:
return
with self.invalidated(targets) as invalidation_check:
safe_mkdir(self.workdir)
def find_jvmdoc_targets():
invalid_targets = set()
for vt in invalidation_check.invalid_vts:
invalid_targets.update(vt.targets)
if self.transitive:
return invalid_targets
else:
return set(invalid_targets).intersection(set(self.context.target_roots))
jvmdoc_targets = list(find_jvmdoc_targets())
if self.combined:
self._generate_combined(jvmdoc_targets, create_jvmdoc_command)
else:
self._generate_individual(jvmdoc_targets, create_jvmdoc_command)
if catalog:
for target in targets:
gendir = self._gendir(target)
jvmdocs = []
for root, dirs, files in safe_walk(gendir):
jvmdocs.extend(os.path.relpath(os.path.join(root, f), gendir) for f in files)
self.context.products.get(self.jvmdoc().product_type).add(target, gendir, jvmdocs)
def _generate_combined(self, targets, create_jvmdoc_command):
gendir = os.path.join(self.workdir, 'combined')
if targets:
classpath = self.classpath(targets)
safe_mkdir(gendir, clean=True)
command = create_jvmdoc_command(classpath, gendir, *targets)
if command:
self.context.log.debug("Running create_jvmdoc in {} with {}".format(gendir, " ".join(command)))
result, gendir = create_jvmdoc(command, gendir)
self._handle_create_jvmdoc_result(targets, result, command)
if self.open:
binary_util.ui_open(os.path.join(gendir, 'index.html'))
def _generate_individual(self, targets, create_jvmdoc_command):
jobs = {}
for target in targets:
gendir = self._gendir(target)
classpath = self.classpath([target])
command = create_jvmdoc_command(classpath, gendir, target)
if command:
jobs[gendir] = (target, command)
if jobs:
with contextlib.closing(
multiprocessing.Pool(processes=min(len(jobs), multiprocessing.cpu_count()))) as pool:
# map would be a preferable api here but fails after the 1st batch with an internal:
# ...
# File "...src/python/pants/backend/core/tasks/jar_create.py", line 170, in javadocjar
# pool.map(createjar, jobs)
# File "...lib/python2.6/multiprocessing/pool.py", line 148, in map
# return self.map_async(func, iterable, chunksize).get()
# File "...lib/python2.6/multiprocessing/pool.py", line 422, in get
# raise self._value
# NameError: global name 'self' is not defined
futures = []
self.context.log.debug("Begin multiprocessing section; output may be misordered or garbled")
try:
for gendir, (target, command) in jobs.items():
self.context.log.debug("Running create_jvmdoc in {} with {}"
.format(gendir, " ".join(command)))
futures.append(pool.apply_async(create_jvmdoc, args=(command, gendir)))
for future in futures:
result, gendir = future.get()
target, command = jobs[gendir]
self._handle_create_jvmdoc_result([target], result, command)
finally:
# In the event of an exception, we want to call terminate() because otherwise
# we get errors on exit when multiprocessing tries to do it, because what
# is dead may never die.
pool.terminate()
self.context.log.debug("End multiprocessing section")
def _handle_create_jvmdoc_result(self, targets, result, command):
if result != 0:
targetlist = ", ".join(map(str, targets))
message = 'Failed to process {} for {} [{}]: {}'.format(
self.jvmdoc().tool_name, targetlist, result, command)
if self.ignore_failure:
self.context.log.warn(message)
else:
raise TaskError(message)
def _gendir(self, target):
return os.path.join(self.workdir, target.id)
def create_jvmdoc(command, gendir):
try:
safe_mkdir(gendir, clean=True)
process = subprocess.Popen(command)
result = process.wait()
return result, gendir
except OSError:
return 1, gendir
| apache-2.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/scipy/signal/wavelets.py | 67 | 10523 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.dual import eig
from scipy.special import comb
from scipy import linspace, pi, exp
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.zeros((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {'0': v / sm}
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float, optional
Omega0. Default is 5
s : float, optional
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool, optional
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of `w`.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
This version has a correction
term to improve admissibility. For `w` greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to `s`.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where `r` is the sampling rate.
Note: This function was created before `cwt` and is not compatible
with it.
"""
x = linspace(-s * 2 * pi, s * 2 * pi, M)
output = exp(1j * w * x)
if complete:
output -= exp(-0.5 * (w**2))
output *= exp(-0.5 * (x**2)) * pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A (1 - x^2/a^2) exp(-x^2/2 a^2)``,
where ``A = 2/sqrt(3a)pi^1/4``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def cwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(length,width)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
::
length = min(10 * width[ii], len(data))
cwt[ii,:] = signal.convolve(data, wavelet(length,
width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
>>> widths = np.arange(1, 31)
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
>>> plt.imshow(cwtmatr, extent=[-1, 1, 31, 1], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
>>> plt.show()
"""
output = np.zeros([len(widths), len(data)])
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(10 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output
| mit |
Work4Labs/lettuce | tests/integration/lib/Django-1.3/django/contrib/messages/tests/user_messages.py | 241 | 2619 | from django import http
from django.contrib.auth.models import User
from django.contrib.messages.storage.user_messages import UserMessagesStorage,\
LegacyFallbackStorage
from django.contrib.messages.tests.base import skipUnlessAuthIsInstalled
from django.contrib.messages.tests.cookie import set_cookie_data
from django.contrib.messages.tests.fallback import FallbackTest
from django.test import TestCase
class UserMessagesTest(TestCase):
def setUp(self):
self.user = User.objects.create(username='tester')
def test_add(self):
storage = UserMessagesStorage(http.HttpRequest())
self.assertRaises(NotImplementedError, storage.add, 'Test message 1')
def test_get_anonymous(self):
# Ensure that the storage still works if no user is attached to the
# request.
storage = UserMessagesStorage(http.HttpRequest())
self.assertEqual(len(storage), 0)
def test_get(self):
storage = UserMessagesStorage(http.HttpRequest())
storage.request.user = self.user
self.user.message_set.create(message='test message')
self.assertEqual(len(storage), 1)
self.assertEqual(list(storage)[0].message, 'test message')
UserMessagesTest = skipUnlessAuthIsInstalled(UserMessagesTest)
class LegacyFallbackTest(FallbackTest, TestCase):
storage_class = LegacyFallbackStorage
def setUp(self):
super(LegacyFallbackTest, self).setUp()
self.user = User.objects.create(username='tester')
def get_request(self, *args, **kwargs):
request = super(LegacyFallbackTest, self).get_request(*args, **kwargs)
request.user = self.user
return request
def test_get_legacy_only(self):
request = self.get_request()
storage = self.storage_class(request)
self.user.message_set.create(message='user message')
# Test that the message actually contains what we expect.
self.assertEqual(len(storage), 1)
self.assertEqual(list(storage)[0].message, 'user message')
def test_get_legacy(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
self.user.message_set.create(message='user message')
set_cookie_data(cookie_storage, ['cookie'])
# Test that the message actually contains what we expect.
self.assertEqual(len(storage), 2)
self.assertEqual(list(storage)[0].message, 'user message')
self.assertEqual(list(storage)[1], 'cookie')
LegacyFallbackTest = skipUnlessAuthIsInstalled(LegacyFallbackTest)
| gpl-3.0 |
patsissons/Flexget | flexget/plugins/search_rarbg.py | 1 | 4811 | from __future__ import unicode_literals, division, absolute_import
import logging
import urllib
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.config_schema import one_or_more
from flexget.utils.requests import Session, get
from flexget.utils.search import normalize_unicode
log = logging.getLogger('rarbg')
requests = Session()
requests.set_domain_delay('torrentapi.org', '10.3 seconds') # they only allow 1 request per 10 seconds
CATEGORIES = {
'all': 0,
# Movies
'x264 720p': 45,
'x264 1080p': 44,
'XviD': 14,
'Full BD': 42,
# TV
'HDTV': 41,
'SDTV': 18
}
class SearchRarBG(object):
"""
RarBG search plugin.
To perform search against single category:
rarbg:
category: x264 720p
To perform search against multiple categories:
rarbg:
category:
- x264 720p
- x264 1080p
Movie categories accepted: x264 720p, x264 1080p, XviD, Full BD
TV categories accepted: HDTV, SDTV
You can use also use category ID manually if you so desire (eg. x264 720p is actually category id '45')
"""
schema = {
'type': 'object',
'properties': {
'category': one_or_more({
'oneOf': [
{'type': 'integer'},
{'type': 'string', 'enum': list(CATEGORIES)},
]}),
'sorted_by': {'type': 'string', 'enum': ['seeders', 'leechers', 'last'], 'default': 'last'},
# min_seeders and min_leechers seem to be working again
'min_seeders': {'type': 'integer', 'default': 0},
'min_leechers': {'type': 'integer', 'default': 0},
'limit': {'type': 'integer', 'enum': [25, 50, 100], 'default': 25},
'ranked': {'type': 'boolean', 'default': True},
'use_tvdb': {'type': 'boolean', 'default': False},
},
"additionalProperties": False
}
base_url = 'https://torrentapi.org/pubapi.php'
def get_token(self):
# Don't use a session as tokens are not affected by domain limit
r = get('https://torrentapi.org/pubapi.php', params={'get_token': 'get_token', 'format': 'json'})
token = None
try:
token = r.json().get('token')
except ValueError:
log.error('Could not retrieve RARBG token.')
log.debug('RarBG token: %s' % token)
return token
@plugin.internet(log)
def search(self, task, entry, config):
"""
Search for entries on RarBG
"""
categories = config.get('category', 'all')
# Ensure categories a list
if not isinstance(categories, list):
categories = [categories]
# Convert named category to its respective category id number
categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]
category_url_fragment = ';'.join(str(c) for c in categories)
entries = set()
token = self.get_token()
if not token:
log.error('No token set. Exiting RARBG search.')
return entries
params = {'mode': 'search', 'token': token, 'ranked': int(config['ranked']),
'min_seeders': config['min_seeders'], 'min_leechers': config['min_leechers'],
'sort': config['sorted_by'], 'category': category_url_fragment, 'format': 'json'}
for search_string in entry.get('search_strings', [entry['title']]):
params.pop('search_string', None)
params.pop('search_imdb', None)
if entry.get('movie_name'):
params['search_imdb'] = entry.get('imdb_id')
else:
query = normalize_unicode(search_string)
query_url_fragment = query.encode('utf8')
params['search_string'] = query_url_fragment
if config['use_tvdb']:
plugin.get_plugin_by_name('thetvdb_lookup').instance.lazy_series_lookup(entry)
params['search_tvdb'] = entry.get('tvdb_id')
log.debug('Using tvdb id %s' % entry.get('tvdb_id'))
page = requests.get(self.base_url, params=params)
log.debug('requesting: %s' % page.url)
try:
r = page.json()
except ValueError:
log.debug(page.text)
continue
for result in r:
e = Entry()
e['title'] = result.get('f')
e['url'] = result.get('d')
entries.add(e)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(SearchRarBG, 'rarbg', groups=['search'], api_ver=2)
| mit |
ilsindia/php-cf-buildtest | lib/build_pack_utils/runner.py | 42 | 6178 | import os
import os.path
import sys
import tempfile
import subprocess
import logging
# This and check_output are shims to support features of Python 2.7
# on Python 2.6.
#
# This code was borrowed from PyPy 2.7.
# bitbucket.org/pypy/pypy/src/9d88b4875d6e/lib-python/2.7/subprocess.py
#
# This can be removed when the CloudFoundry environment is upgraded
# to Python 2.7 or higher.
#
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (
self.cmd, self.returncode)
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=subprocess.STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
def stream_output(*popenargs, **kwargs):
r"""Run command with arguments and stream its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute.
The first argument should be the file like object where the output
should be written. The remainder of the arguments are the same as
for the Popen constructor.
Example:
>>> fp = open('cmd-output.txt', 'wb')
>>> stream_output(fp, ["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> fp = open('cmd-output.txt', 'wb')
>>> stream_output(fp, ["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=subprocess.STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if hasattr(popenargs[0], 'fileno'):
process = subprocess.Popen(stdout=popenargs[0],
*popenargs[1:], **kwargs)
retcode = process.wait()
else:
process = subprocess.Popen(stdout=subprocess.PIPE,
*popenargs[1:], **kwargs)
for c in iter(lambda: process.stdout.read(1024), ''):
popenargs[0].write(c)
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
class BuildPack(object):
def __init__(self, ctx, url, branch=None, stream=sys.stdout):
self._ctx = ctx
self._url = url
self._branch = branch
self._stream = stream
self.bp_dir = tempfile.mkdtemp(prefix='buildpack')
self._log = logging.getLogger('runner')
def run(self):
if self._url:
self._clone()
self.framework = self._detect()
self._compile()
self.start_yml = self._release()
def _clone(self):
self._log.debug("Cloning [%s] to [%s]", self._url, self.bp_dir)
stream_output(self._stream,
" ".join(['git', 'clone', self._url, self.bp_dir]),
stderr=subprocess.STDOUT,
shell=True)
if self._branch:
self._log.debug("Branching to [%s]", self._branch)
stream_output(self._stream,
" ".join(['git', 'checkout', self._branch]),
stderr=subprocess.STDOUT,
shell=True)
def _detect(self):
self._log.debug("Running detect script")
cmd = [os.path.join(self.bp_dir, 'bin', 'detect'),
self._ctx['BUILD_DIR']]
return check_output(" ".join(cmd),
stderr=subprocess.STDOUT,
shell=True).strip()
def _compile(self):
self._log.debug("Running compile script with build dir [%s] "
"and cache dir [%s]",
self._ctx['BUILD_DIR'],
self._ctx['CACHE_DIR'])
cmd = [os.path.join(self.bp_dir, 'bin', 'compile'),
self._ctx['BUILD_DIR'],
self._ctx['CACHE_DIR']]
stream_output(self._stream,
" ".join(cmd),
stderr=subprocess.STDOUT,
shell=True)
def _release(self):
self._log.debug("Running release script")
cmd = [os.path.join(self.bp_dir, 'bin', 'release'),
self._ctx['BUILD_DIR']]
return check_output(" ".join(cmd),
stderr=subprocess.STDOUT,
shell=True).strip()
| apache-2.0 |
HeinleinSupport/check_mk | python-setuptools/lib/python/setuptools/py36compat.py | 313 | 2891 | import sys
from distutils.errors import DistutilsOptionError
from distutils.util import strtobool
from distutils.debug import DEBUG
class Distribution_parse_config_files:
"""
Mix-in providing forward-compatibility for functionality to be
included by default on Python 3.7.
Do not edit the code in this class except to update functionality
as implemented in distutils.
"""
def parse_config_files(self, filenames=None):
from configparser import ConfigParser
# Ignore install directory options if we have a venv
if sys.prefix != sys.base_prefix:
ignore_options = [
'install-base', 'install-platbase', 'install-lib',
'install-platlib', 'install-purelib', 'install-headers',
'install-scripts', 'install-data', 'prefix', 'exec-prefix',
'home', 'user', 'root']
else:
ignore_options = []
ignore_options = frozenset(ignore_options)
if filenames is None:
filenames = self.find_config_files()
if DEBUG:
self.announce("Distribution.parse_config_files():")
parser = ConfigParser(interpolation=None)
for filename in filenames:
if DEBUG:
self.announce(" reading %s" % filename)
parser.read(filename)
for section in parser.sections():
options = parser.options(section)
opt_dict = self.get_option_dict(section)
for opt in options:
if opt != '__name__' and opt not in ignore_options:
val = parser.get(section,opt)
opt = opt.replace('-', '_')
opt_dict[opt] = (filename, val)
# Make the ConfigParser forget everything (so we retain
# the original filenames that options come from)
parser.__init__()
# If there was a "global" section in the config file, use it
# to set Distribution options.
if 'global' in self.command_options:
for (opt, (src, val)) in self.command_options['global'].items():
alias = self.negative_opt.get(opt)
try:
if alias:
setattr(self, alias, not strtobool(val))
elif opt in ('verbose', 'dry_run'): # ugh!
setattr(self, opt, strtobool(val))
else:
setattr(self, opt, val)
except ValueError as msg:
raise DistutilsOptionError(msg)
if sys.version_info < (3,):
# Python 2 behavior is sufficient
class Distribution_parse_config_files:
pass
if False:
# When updated behavior is available upstream,
# disable override here.
class Distribution_parse_config_files:
pass
| gpl-2.0 |
calebfoss/tensorflow | tensorflow/contrib/ndlstm/python/__init__.py | 135 | 1103 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Init file, giving convenient access to all ndlstm ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,g-importing-member
from tensorflow.contrib.ndlstm.python.lstm1d import *
from tensorflow.contrib.ndlstm.python.lstm2d import *
from tensorflow.contrib.ndlstm.python.misc import *
# pylint: enable=wildcard-import
| apache-2.0 |
bxlab/bx-python | lib/bx_extras/pyparsing.py | 1 | 137392 | # module pyparsing.py
#
# Copyright (c) 2003-2008 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form "<salutation>, <addressee>!")::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print hello, "->", greet.parseString( hello )
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from parseString() can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "1.5.0"
__versionTime__ = "28 May 2008 10:05"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import copy
import re
import string
import sys
import warnings
import xml.sax.saxutils
from weakref import ref as wkref
import sre_constants
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'getTokensEndLoc', 'hexnums',
'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock',
]
"""
Detect if we are running version 3.X and make appropriate changes
Robert A. Clark
"""
_MAX_INT = sys.maxsize
def _str2dict(strg):
return {c: 0 for c in strg}
# ~ return set( [c for c in strg] )
class _Constants:
pass
alphas = string.ascii_lowercase + string.ascii_uppercase
nums = string.digits
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = "\\"
printables = "".join([c for c in string.printable if c not in string.whitespace])
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
__slots__ = ("loc", "msg", "pstr", "parserElement")
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(self, pstr, loc=0, msg=None, elem=None):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__(self, aname):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if(aname == "lineno"):
return lineno(self.loc, self.pstr)
elif(aname in ("col", "column")):
return col(self.loc, self.pstr)
elif(aname == "line"):
return line(self.loc, self.pstr)
else:
raise AttributeError(aname)
def __str__(self):
return "%s (at char %d), (line:%d, col:%d)" % (self.msg, self.loc, self.lineno, self.column)
def __repr__(self):
return str(self)
def markInputline(self, markerString=">!<"):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join([line_str[:line_column],
markerString, line_str[line_column:]])
return line_str.strip()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
class ParseSyntaxException(ParseFatalException):
"""just like ParseFatalException, but thrown internally when an
ErrorStop indicates that parsing is to stop immediately because
an unbacktrackable syntax error has been found"""
def __init__(self, pe):
super().__init__(pe.pstr, pe.loc, pe.msg, pe.parserElement)
class RecursiveGrammarException(Exception):
"""exception thrown by validate() if the grammar could be improperly recursive"""
def __init__(self, parseElementList):
self.parseElementTrace = parseElementList
def __str__(self):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset:
def __init__(self, p1, p2):
self.tup = (p1, p2)
def __getitem__(self, i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
class ParseResults:
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (len(results))
- by list index (results[0], results[1], etc.)
- by attribute (results.<resultsName>)
"""
__slots__ = ("__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__")
def __new__(cls, toklist, name=None, asList=True, modal=True):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(self, toklist, name=None, asList=True, modal=True):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
else:
self.__toklist = [toklist]
self.__tokdict = dict()
# this line is related to debugging the asXML bug
# ~ asList = False
if name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name, int):
name = str(name)
self.__name = name
if toklist not in (None, '', []):
if isinstance(toklist, str):
toklist = [toklist]
if asList:
if isinstance(toklist, ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(), -1)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), -1)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError, TypeError):
self[name] = toklist
def __getitem__(self, i):
if isinstance(i, (int, slice)):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([v[0] for v in self.__tokdict[i]])
def __setitem__(self, k, v):
if isinstance(v, _ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k, list()) + [v]
sub = v[0]
elif isinstance(k, int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k, list()) + [_ParseResultsWithOffset(v, 0)]
sub = v
if isinstance(sub, ParseResults):
sub.__parent = wkref(self)
def __delitem__(self, i):
if isinstance(i, (int, slice)):
mylen = len(self.__toklist)
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__(self, k):
return k in self.__tokdict
def __len__(self):
return len(self.__toklist)
def __bool__(self):
return len(self.__toklist) > 0
__nonzero__ = __bool__
def __iter__(self):
return iter(self.__toklist)
def __reversed__(self):
return iter(reversed(self.__toklist))
def keys(self):
"""Returns all named result keys."""
return self.__tokdict.keys()
def pop(self, index=-1):
"""Removes and returns item at specified index (default=last).
Will work with either numeric indices or dict-key indicies."""
ret = self[index]
del self[index]
return ret
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given defaultValue or None if no
defaultValue is specified."""
if key in self:
return self[key]
else:
return defaultValue
def items(self):
"""Returns all named result keys and values as a list of tuples."""
return [(k, self[k]) for k in self.__tokdict]
def values(self):
"""Returns all named result values."""
return [v[-1][0] for v in self.__tokdict.values()]
def __getattr__(self, name):
if name not in self.__slots__:
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([v[0] for v in self.__tokdict[name]])
else:
return ""
return None
def __add__(self, other):
ret = self.copy()
ret += other
return ret
def __iadd__(self, other):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = (lambda a: (a < 0 and offset) or (a+offset))
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
for (k, vlist) in otheritems for v in vlist]
for k, v in otherdictitems:
self[k] = v
if isinstance(v[0], ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update(other.__accumNames)
del other
return self
def __repr__(self):
return "({}, {})".format(repr(self.__toklist), repr(self.__tokdict))
def __str__(self):
out = "["
sep = ""
for i in self.__toklist:
if isinstance(i, ParseResults):
out += sep + str(i)
else:
out += sep + repr(i)
sep = ", "
out += "]"
return out
def _asStringList(self, sep=''):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance(item, ParseResults):
out += item._asStringList()
else:
out.append(str(item))
return out
def asList(self):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res, ParseResults):
out.append(res.asList())
else:
out.append(res)
return out
def asDict(self):
"""Returns the named parse results as dictionary."""
return dict(self.items())
def copy(self):
"""Returns a new copy of a ParseResults object."""
ret = ParseResults(self.__toklist)
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update(self.__accumNames)
ret.__name = self.__name
return ret
def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = {v[1]: k for (k, vlist) in self.__tokdict.items() for v in vlist}
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [nl, indent, "<", selfTag, ">"]
worklist = self.__toklist
for i, res in enumerate(worklist):
if isinstance(res, ParseResults):
if i in namedItems:
out += [res.asXML(
namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [res.asXML(
None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = xml.sax.saxutils.escape(str(res))
out += [nl, nextLevelIndent, "<", resTag, ">", xmlBodyText, "</", resTag, ">"]
out += [nl, indent, "</", selfTag, ">"]
return "".join(out)
def __lookup(self, sub):
for k, vlist in self.__tokdict.items():
for v, _loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1
and len(self.__tokdict) == 1
and self.__tokdict.values()[0][0][1] in (0, -1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self, indent='', depth=0):
"""Diagnostic method for listing out the contents of a ParseResults.
Accepts an optional indent argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append(indent+str(self.asList()))
keys = sorted(self.items())
for k, v in keys:
if out:
out.append('\n')
out.append("{}{}- {}: ".format(indent, (' '*depth), k))
if isinstance(v, ParseResults):
if v.keys():
out.append(v.dump(indent, depth+1))
else:
out.append(str(v))
else:
out.append(str(v))
return "".join(out)
# add support for pickle protocol
def __getstate__(self):
return (self.__toklist, (
self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name))
def __setstate__(self, state):
self.__toklist = state[0]
self.__tokdict, par, inAccumNames, self.__name = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def col(loc, strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return (loc < len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc, strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n", 0, loc) + 1
def line(loc, strg):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR > 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction(instring, loc, expr):
print("Match " + str(expr) + " at loc " + str(loc) + "(%d,%d)" % (lineno(loc, instring), col(loc, instring)))
def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks):
print("Matched " + str(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction(instring, loc, expr, exc):
print("Exception raised:" + str(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
class ParserElement:
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
def setDefaultWhitespaceChars(chars):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def __init__(self, savelist=False):
self.parseAction = list()
self.failAction = None
# ~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = (None, None, None) # custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy(self):
"""Make a copy of this ParserElement. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy(self)
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName(self, name):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self, "exception"):
self.exception.msg = self.errmsg
return self
def setResultsName(self, name, listAllMatches=False):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original ParserElement object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
"""
newself = self.copy()
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self, breakFlag=True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set breakFlag to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
_parseMethod(instring, loc, doActions, callPreParse)
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse, "_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def _normalizeParseActionArgs(f):
"""Internal method used to decorate parse actions that take fewer than 3 arguments,
so that all parse actions can be called as f(s,l,t)."""
STAR_ARGS = 4
try:
restore = None
if isinstance(f, type):
restore = f
f = f.__init__
codeObj = f.code
if codeObj.co_flags & STAR_ARGS:
return f
numargs = codeObj.co_argcount
if hasattr(f, "__self__"):
numargs -= 1
if restore:
f = restore
except AttributeError:
try:
call_im_func_code = f.__code__
# not a function, must be a callable object, get info from the
# im_func binding of its bound __call__ method
if call_im_func_code.co_flags & STAR_ARGS:
return f
numargs = call_im_func_code.co_argcount
if hasattr(f.__call__, "__self__"):
numargs -= 0
except AttributeError:
call_func_code = f.__call__.__code__
# not a bound method, get info directly from __call__ method
if call_func_code.co_flags & STAR_ARGS:
return f
numargs = call_func_code.co_argcount
if hasattr(f.__call__, "__self__"):
numargs -= 1
# ~ print ("adding function %s with %d args" % (f.func_name,numargs))
if numargs == 3:
return f
else:
if numargs > 3:
def tmp(s, l, t):
return f(f.__call__.__self__, s, l, t)
elif numargs == 2:
def tmp(s, l, t):
return f(l, t)
elif numargs == 1:
def tmp(s, l, t):
return f(t)
else: # ~ numargs == 0:
def tmp(s, l, t):
return f()
try:
tmp.__name__ = f.__name__
except (AttributeError, TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__doc__ = f.__doc__
except (AttributeError, TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__dict__.update(f.__dict__)
except (AttributeError, TypeError):
# no need for special handling if attribute doesnt exist
pass
return tmp
_normalizeParseActionArgs = staticmethod(_normalizeParseActionArgs)
def setParseAction(self, *fns, **kwargs):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as fn(s,loc,toks),
fn(loc,toks), fn(toks), or just fn(), where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a ParseResults object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = list(map(self._normalizeParseActionArgs, list(fns)))
self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def addParseAction(self, *fns, **kwargs):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += list(map(self._normalizeParseActionArgs, list(fns)))
self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def setFailAction(self, fn):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
fn(s,loc,expr,err) where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw ParseFatalException
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables(self, instring, loc):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while True:
loc, dummy = e._parse(instring, loc)
exprsFound = True
except ParseException:
pass
return loc
def preParse(self, instring, loc):
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
return loc, []
def postParse(self, instring, loc, tokenlist):
return tokenlist
# ~ @profile
def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True):
debugging = (self.debug) # and doActions )
if debugging or self.failAction:
# ~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if self.debugActions[0]:
self.debugActions[0](instring, loc, self)
if callPreParse and self.callPreparse:
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = loc
try:
try:
loc, tokens = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
except ParseBaseException as err:
# ~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2](instring, tokensStart, self, err)
if self.failAction:
self.failAction(instring, tokensStart, self, err)
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = loc
if self.mayIndexError or loc >= len(instring):
try:
loc, tokens = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
else:
loc, tokens = self.parseImpl(instring, preloc, doActions)
tokens = self.postParse(instring, loc, tokens)
retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults)
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn(instring, tokensStart, retTokens)
if tokens is not None:
retTokens = ParseResults(
tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults)
except ParseBaseException as err:
# ~ print "Exception raised in user parse action:", err
if self.debugActions[2]:
self.debugActions[2](instring, tokensStart, self, err)
raise
else:
for fn in self.parseAction:
tokens = fn(instring, tokensStart, retTokens)
if tokens is not None:
retTokens = ParseResults(
tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults)
if debugging:
# ~ print ("Matched",self,"->",retTokens.asList())
if self.debugActions[1]:
self.debugActions[1](instring, tokensStart, loc, self, retTokens)
return loc, retTokens
def tryParse(self, instring, loc):
try:
return self._parse(instring, loc, doActions=False)[0]
except ParseFatalException:
raise ParseException(instring, loc, self.errmsg, self)
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache(self, instring, loc, doActions=True, callPreParse=True):
lookup = (self, instring, loc, callPreParse, doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[lookup]
if isinstance(value, Exception):
raise value
return value
else:
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
ParserElement._exprArgCache[lookup] = (value[0], value[1].copy())
return value
except ParseBaseException as pe:
ParserElement._exprArgCache[lookup] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method ParserElement.enablePackrat(). If
your program uses psyco to "compile as you go", you must call
enablePackrat before calling psyco.full(). If you do not do this,
Python will crash. For best results, call enablePackrat() immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString(self, instring, parseAll=False):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set parseAll to True (equivalent to ending
the grammar with StringEnd()).
Note: parseString implicitly calls expandtabs() on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the loc argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling parseWithTabs on your grammar before calling parseString
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full (s,loc,toks) signature, and
reference the input string using the parse action's s argument
- explictly expand the tabs in your input string before calling
parseString
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
# ~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
loc, tokens = self._parse(instring, 0)
if parseAll:
StringEnd()._parse(instring, loc)
return tokens
def scanString(self, instring, maxMatches=_MAX_INT):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
maxMatches argument, to clip scanning after 'n' matches are found.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = str(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn(instring, loc)
nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)
except ParseException:
loc = preloc+1
else:
matches += 1
yield tokens, preloc, nextLoc
loc = nextLoc
def transformString(self, instring):
"""Extension to scanString, to modify matching text with modified tokens that may
be returned from a parse action. To use transformString, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking transformString() on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. transformString() returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
for t, s, e in self.scanString(instring):
out.append(instring[lastE:s])
if t:
if isinstance(t, ParseResults):
out += t.asList()
elif isinstance(t, list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
return "".join(map(str, out))
def searchString(self, instring, maxMatches=_MAX_INT):
"""Another extension to scanString, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
maxMatches argument, to clip searching after 'n' matches are found.
"""
return ParseResults([t for t, s, e in self.scanString(instring, maxMatches)])
def __add__(self, other):
"""Implementation of + operator - returns And"""
if isinstance(other, str):
other = Literal(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2)
return None
return And([self, other])
def __radd__(self, other):
"""Implementation of + operator when left operand is not a ParserElement"""
if isinstance(other, str):
other = Literal(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""Implementation of - operator, returns And with error stop"""
if isinstance(other, str):
other = Literal(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2)
return None
return And([self, And._ErrorStop(), other])
def __rsub__(self, other):
"""Implementation of - operator when left operand is not a ParserElement"""
if isinstance(other, str):
other = Literal(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2)
return None
return other - self
def __mul__(self, other):
if isinstance(other, int):
minElements, optElements = other, 0
elif isinstance(other, tuple):
if len(other) == 0:
other = (None, None)
elif len(other) == 1:
other = (other[0], None)
if len(other) == 2:
if other[0] is None:
other = (0, other[1])
if isinstance(other[0], int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0], int) and isinstance(other[1], int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]), type(other[1]))
else:
raise TypeError("can only multiply 'ParserElement' and int or (int,int) objects")
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if optElements:
def makeOptionalList(n):
if n > 1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other):
"""Implementation of | operator - returns MatchFirst"""
if isinstance(other, str):
other = Literal(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2)
return None
return MatchFirst([self, other])
def __ror__(self, other):
"""Implementation of | operator when left operand is not a ParserElement"""
if isinstance(other, str):
other = Literal(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2)
return None
return other | self
def __xor__(self, other):
"""Implementation of ^ operator - returns Or"""
if isinstance(other, str):
other = Literal(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2)
return None
return Or([self, other])
def __rxor__(self, other):
"""Implementation of ^ operator when left operand is not a ParserElement"""
if isinstance(other, str):
other = Literal(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2)
return None
return other ^ self
def __and__(self, other):
"""Implementation of & operator - returns Each"""
if isinstance(other, str):
other = Literal(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2)
return None
return Each([self, other])
def __rand__(self, other):
"""Implementation of & operator when left operand is not a ParserElement"""
if isinstance(other, str):
other = Literal(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2)
return None
return other & self
def __invert__(self):
"""Implementation of ~ operator - returns NotAny"""
return NotAny(self)
def __call__(self, name):
"""Shortcut for setResultsName, with listAllMatches=default::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
return self.setResultsName(name)
def suppress(self):
"""Suppresses the output of this ParserElement; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress(self)
def leaveWhitespace(self):
"""Disables the skipping of whitespace before matching the characters in the
ParserElement's defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars(self, chars):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs(self):
"""Overrides default behavior to expand <TAB>s to spaces before parsing the input string.
Must be called before parseString when the input grammar contains elements that
match <TAB> characters."""
self.keepTabs = True
return self
def ignore(self, other):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append(Suppress(other))
return self
def setDebugActions(self, startAction, successAction, exceptionAction):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug(self, flag=True):
"""Enable display of debugging messages while doing pattern matching.
Set flag to True to enable, False to disable."""
if flag:
self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction)
else:
self.debug = False
return self
def __str__(self):
return self.name
def __repr__(self):
return str(self)
def streamline(self):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion(self, parseElementList):
pass
def validate(self, validateTrace=None):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion([])
def parseFile(self, file_or_filename):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "rb")
file_contents = f.read()
f.close()
return self.parseString(file_contents)
def getException(self):
return ParseException("", 0, self.errmsg, self)
def __getattr__(self, aname):
if aname == "myException":
self.myException = ret = self.getException()
return ret
else:
raise AttributeError("no such attribute " + aname)
def __eq__(self, other):
if isinstance(other, str):
try:
(self + StringEnd()).parseString(str(other))
return True
except ParseBaseException:
return False
else:
return super() == other
def __hash__(self):
return hash(id(self))
def __req__(self, other):
return self == other
class Token(ParserElement):
"""Abstract ParserElement subclass, for defining atomic matching patterns."""
def __init__(self):
super().__init__(savelist=False)
def setName(self, name):
s = super().setName(name)
self.errmsg = "Expected " + self.name
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__(self):
super().__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__(self):
super().__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl(self, instring, loc, doActions=True):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__(self, matchString):
super().__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn(
"null string passed to Literal; use Empty() instead",
SyntaxWarning,
stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % str(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
# ~ @profile
def parseImpl(self, instring, loc, doActions=True):
if (instring[loc] == self.firstMatchChar
and (self.matchLen == 1 or instring.startswith(self.match, loc))):
return loc+self.matchLen, self.match
# ~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
_L = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with Literal::
Literal("if") will match the leading 'if' in 'ifAndOnlyIf'.
Keyword("if") will not; it will only match the leading 'if in 'if x=1', or 'if(y==2)'
Accepts two optional constructor arguments in addition to the keyword string:
identChars is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; caseless allows case-insensitive
matching, default is False.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__(self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False):
super().__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn(
"null string passed to Keyword; use Empty() instead",
SyntaxWarning,
stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = _str2dict(identChars)
def parseImpl(self, instring, loc, doActions=True):
if self.caseless:
if ((instring[loc:loc+self.matchLen].upper() == self.caselessmatch)
and (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars)
and (loc == 0 or instring[loc-1].upper() not in self.identChars)):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar
and (self.matchLen == 1 or instring.startswith(self.match, loc))
and (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars)
and (loc == 0 or instring[loc-1] not in self.identChars)):
return loc+self.matchLen, self.match
# ~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def copy(self):
c = super().copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars(chars):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__(self, matchString):
super().__init__(matchString.upper())
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl(self, instring, loc, doActions=True):
if instring[loc:loc+self.matchLen].upper() == self.match:
return loc+self.matchLen, self.returnString
# ~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class CaselessKeyword(Keyword):
def __init__(self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS):
super().__init__(matchString, identChars, caseless=True)
def parseImpl(self, instring, loc, doActions=True):
if ((instring[loc:loc+self.matchLen].upper() == self.caselessmatch)
and (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars)):
return loc+self.matchLen, self.match
# ~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for min is 1 (a
minimum value < 1 is not valid); the default values for max and exact
are 0, meaning no maximum or exact length restriction.
"""
def __init__(self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False):
super().__init__()
self.initCharsOrig = initChars
self.initChars = _str2dict(initChars)
if bodyChars:
self.bodyCharsOrig = bodyChars
self.bodyChars = _str2dict(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = _str2dict(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = str(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min == 1 and max == 0 and exact == 0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "{}[{}]*".format(
re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig))
else:
self.reString = "[{}][{}]*".format(
_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig))
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile(self.reString)
except Exception:
self.re = None
def parseImpl(self, instring, loc, doActions=True):
if self.re:
result = self.re.match(instring, loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
return loc, result.group()
if not(instring[loc] in self.initChars):
# ~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min(maxloc, instrlen)
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start > 0 and instring[start-1] in bodychars) or (loc < instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
# ~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__(self):
try:
return super().__str__()
except Exception:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s) > 4:
return s[:4]+"..."
else:
return s
if self.initCharsOrig != self.bodyCharsOrig:
self.strRepr = "W:({},{})".format(charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig))
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
def __init__(self, pattern, flags=0):
"""The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags."""
super().__init__()
if len(pattern) == 0:
warnings.warn(
"null string passed to Regex; use Empty() instead",
SyntaxWarning,
stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn(
"invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning,
stacklevel=2)
raise
self.name = str(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
result = self.re.match(instring, loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc, ret
def __str__(self):
try:
return super().__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=False)
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)
"""
super().__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string", SyntaxWarning, stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string", SyntaxWarning, stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'{}(?:[^{}{}]'.format(
re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or ''))
else:
self.flags = 0
self.pattern = r'{}(?:[^{}\n\r{}]'.format(
re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or ''))
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join(["{}[^{}]".format(
re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i])
) for i in range(len(self.endQuoteChar)-1, 0, -1)]) + ')')
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn(
"invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning,
stacklevel=2)
raise
self.name = str(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring, loc) or None
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret, str):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__(self):
try:
return super().__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = f"quoted string, starting with {self.quoteChar} ending with {self.endQuoteChar}"
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for min is 1 (a
minimum value < 1 is not valid); the default values for max and exact
are 0, meaning no maximum or exact length restriction.
"""
def __init__(self, notChars, min=1, max=0, exact=0):
super().__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = str(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = (self.minLen == 0)
self.mayIndexError = False
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] in self.notChars:
# ~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
notchars = self.notChars
maxlen = min(start+self.maxLen, len(instring))
while loc < maxlen and (instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
# ~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__(self):
try:
return super().__str__()
except Exception:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is " \\t\\n". Also takes optional min, max, and exact arguments,
as defined for the Word class."""
whiteStrs = {
" ": "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super().__init__()
self.matchWhite = ws
self.setWhitespaceChars("".join([c for c in self.whiteChars if c not in self.matchWhite]))
self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite]))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl(self, instring, loc, doActions=True):
if not(instring[loc] in self.matchWhite):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min(maxloc, len(instring))
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
# ~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__(self, colno):
super().__init__()
self.col = colno
def preParse(self, instring, loc):
if col(loc, instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
while loc < instrlen and instring[loc].isspace() and col(loc, instring) != self.col:
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
thiscol = col(loc, instring)
if thiscol > self.col:
raise ParseException(instring, loc, "Text not in expected column", self)
newloc = loc + self.col - thiscol
ret = instring[loc: newloc]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__(self):
super().__init__()
self.setWhitespaceChars(" \t")
self.errmsg = "Expected start of line"
def preParse(self, instring, loc):
preloc = super().preParse(instring, loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
if not(loc == 0
or (loc == self.preParse(instring, 0))
or (instring[loc-1] == "\n")): # col(loc, instring) != 1:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__(self):
super().__init__()
self.setWhitespaceChars(" \t")
self.errmsg = "Expected end of line"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__(self):
super().__init__()
self.errmsg = "Expected start of text"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse(instring, 0):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__(self):
super().__init__()
self.errmsg = "Expected end of text"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of wordChars
(default=printables). To emulate the \b behavior of regular expressions,
use WordStart(alphanums). WordStart will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars=printables):
super().__init__()
self.wordChars = _str2dict(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
if (instring[loc-1] in self.wordChars
or instring[loc] not in self.wordChars):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of wordChars
(default=printables). To emulate the \b behavior of regular expressions,
use WordEnd(alphanums). WordEnd will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars=printables):
super().__init__()
self.wordChars = _str2dict(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True):
instrlen = len(instring)
if instrlen > 0 and loc < instrlen:
if (instring[loc] in self.wordChars
or instring[loc-1] not in self.wordChars):
# ~ raise ParseException( instring, loc, "Expected end of word" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__(self, exprs, savelist=False):
super().__init__(savelist)
if isinstance(exprs, list):
self.exprs = exprs
elif isinstance(exprs, str):
self.exprs = [Literal(exprs)]
else:
self.exprs = [exprs]
self.callPreparse = False
def __getitem__(self, i):
return self.exprs[i]
def append(self, other):
self.exprs.append(other)
self.strRepr = None
return self
def leaveWhitespace(self):
"""Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [e.copy() for e in self.exprs]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore(self, other):
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super().ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
else:
super().ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
return self
def __str__(self):
try:
return super().__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "{}:({})".format(self.__class__.__name__, str(self.exprs))
return self.strRepr
def streamline(self):
super().streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if len(self.exprs) == 2:
other = self.exprs[0]
if (isinstance(other, self.__class__)
and not(other.parseAction)
and other.resultsName is None
and not other.debug):
self.exprs = other.exprs[:] + [self.exprs[1]]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if (isinstance(other, self.__class__)
and not(other.parseAction)
and other.resultsName is None
and not other.debug):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName(self, name, listAllMatches=False):
ret = super().setResultsName(name, listAllMatches)
return ret
def validate(self, validateTrace=None):
if validateTrace is None:
validateTrace = []
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion([])
class And(ParseExpression):
"""Requires all given ParseExpressions to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the '+' operator.
"""
class _ErrorStop(Empty):
def __new__(cls, *args, **kwargs):
return And._ErrorStop.instance
_ErrorStop.instance = Empty()
_ErrorStop.instance.leaveWhitespace()
def __init__(self, exprs, savelist=True):
super().__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.setWhitespaceChars(exprs[0].whiteChars)
self.skipWhitespace = exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl(self, instring, loc, doActions=True):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse(instring, loc, doActions, callPreParse=False)
errorStop = False
for e in self.exprs[1:]:
if e is And._ErrorStop.instance:
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse(instring, loc, doActions)
except ParseBaseException as pe:
raise ParseSyntaxException(pe)
except IndexError:
raise ParseSyntaxException(ParseException(instring, len(instring), self.errmsg, self))
else:
loc, exprtokens = e._parse(instring, loc, doActions)
if exprtokens or exprtokens.keys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other):
if isinstance(other, str):
other = Literal(other)
return self.append(other) # And( [ self, other ] )
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
if not e.mayReturnEmpty:
break
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join([str(e) for e in self.exprs]) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one ParseExpression is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the '^' operator.
"""
def __init__(self, exprs, savelist=False):
super().__init__(exprs, savelist)
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxMatchLoc = -1
maxException = None
for e in self.exprs:
try:
loc2 = e.tryParse(instring, loc)
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring, len(instring), e.errmsg, self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse(instring, loc, doActions)
def __ixor__(self, other):
if isinstance(other, str):
other = Literal(other)
return self.append(other) # Or( [ self, other ] )
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join([str(e) for e in self.exprs]) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
class MatchFirst(ParseExpression):
"""Requires that at least one ParseExpression is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the '|' operator.
"""
def __init__(self, exprs, savelist=False):
super().__init__(exprs, savelist)
if exprs:
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
else:
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse(instring, loc, doActions)
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring, len(instring), e.errmsg, self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other):
if isinstance(other, str):
other = Literal(other)
return self.append(other) # MatchFirst( [ self, other ] )
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join([str(e) for e in self.exprs]) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
class Each(ParseExpression):
"""Requires all given ParseExpressions to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the '&' operator.
"""
def __init__(self, exprs, savelist=True):
super().__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl(self, instring, loc, doActions=True):
if self.initExprGroups:
self.optionals = [e.expr for e in self.exprs if isinstance(e, Optional)]
self.multioptionals = [e.expr for e in self.exprs if isinstance(e, ZeroOrMore)]
self.multirequired = [e.expr for e in self.exprs if isinstance(e, OneOrMore)]
self.required = [e for e in self.exprs if not isinstance(e, (Optional, ZeroOrMore, OneOrMore))]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse(instring, tmpLoc)
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join([str(e) for e in tmpReqd])
raise ParseException(instring, loc, "Missing one or more required elements (%s)" % missing)
# add any unmatched Optionals, in case they have default values defined
matchOrder += list(e for e in self.exprs if isinstance(e, Optional) and e.expr in tmpOpt)
resultlist = []
for e in matchOrder:
loc, results = e._parse(instring, loc, doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults.keys():
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k, v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join([str(e) for e in self.exprs]) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
class ParseElementEnhance(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__(self, expr, savelist=False):
super().__init__(savelist)
if isinstance(expr, str):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars(expr.whiteChars)
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl(self, instring, loc, doActions=True):
if self.expr is not None:
return self.expr._parse(instring, loc, doActions, callPreParse=False)
else:
raise ParseException("", loc, self.errmsg, self)
def leaveWhitespace(self):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore(self, other):
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super().ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
else:
super().ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
return self
def streamline(self):
super().streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion(self, parseElementList):
if self in parseElementList:
raise RecursiveGrammarException(parseElementList+[self])
subRecCheckList = parseElementList[:] + [self]
if self.expr is not None:
self.expr.checkRecursion(subRecCheckList)
def validate(self, validateTrace=None):
if validateTrace is None:
validateTrace = []
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__(self):
try:
return super().__str__()
except Exception:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "{}:({})".format(self.__class__.__name__, str(self.expr))
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. FollowedBy
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. FollowedBy always returns a null token list."""
def __init__(self, expr):
super().__init__(expr)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
self.expr.tryParse(instring, loc)
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. NotAny
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, NotAny does *not* skip over leading whitespace. NotAny
always returns a null token list. May be constructed using the '~' operator."""
def __init__(self, expr):
super().__init__(expr)
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+str(self.expr)
def parseImpl(self, instring, loc, doActions=True):
try:
self.expr.tryParse(instring, loc)
except (ParseException, IndexError):
pass
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + str(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__(self, expr):
super().__init__(expr)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
tokens = []
try:
loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False)
hasIgnoreExprs = (len(self.ignoreExprs) > 0)
while True:
if hasIgnoreExprs:
preloc = self._skipIgnorables(instring, loc)
else:
preloc = loc
loc, tmptokens = self.expr._parse(instring, preloc, doActions)
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException, IndexError):
pass
return loc, tokens
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + str(self.expr) + "]..."
return self.strRepr
def setResultsName(self, name, listAllMatches=False):
ret = super().setResultsName(name, listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl(self, instring, loc, doActions=True):
# must be at least one
loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False)
try:
hasIgnoreExprs = (len(self.ignoreExprs) > 0)
while True:
if hasIgnoreExprs:
preloc = self._skipIgnorables(instring, loc)
else:
preloc = loc
loc, tmptokens = self.expr._parse(instring, preloc, doActions)
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException, IndexError):
pass
return loc, tokens
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + str(self.expr) + "}..."
return self.strRepr
def setResultsName(self, name, listAllMatches=False):
ret = super().setResultsName(name, listAllMatches)
ret.saveAsList = True
return ret
class _NullToken:
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__(self, exprs, default=_optionalNotMatched):
super().__init__(exprs, savelist=False)
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
try:
loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False)
except (ParseException, IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([self.defaultValue])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [self.defaultValue]
else:
tokens = []
return loc, tokens
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + str(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If include is set to true, the matched expression is also consumed. The ignore
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__(self, other, include=False, ignore=None):
super().__init__(other)
if ignore is not None:
self.expr = self.expr.copy()
self.expr.ignore(ignore)
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
self.errmsg = "No match found for "+str(self.expr)
def parseImpl(self, instring, loc, doActions=True):
startLoc = loc
instrlen = len(instring)
expr = self.expr
while loc <= instrlen:
try:
loc = expr._skipIgnorables(instring, loc)
expr._parse(instring, loc, doActions=False, callPreParse=False)
if self.includeMatch:
skipText = instring[startLoc:loc]
loc, mat = expr._parse(instring, loc, doActions, callPreParse=False)
if mat:
skipRes = ParseResults(skipText)
skipRes += mat
return loc, [skipRes]
else:
return loc, [skipText]
else:
return loc, [instring[startLoc:loc]]
except (ParseException, IndexError):
loc += 1
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the Forward variable using the '<<' operator.
Note: take care when assigning to Forward not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the Forward::
fwdExpr << (a | b | c)
"""
def __init__(self, other=None):
super().__init__(other, savelist=False)
def __lshift__(self, other):
if isinstance(other, str):
other = Literal(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars(self.expr.whiteChars)
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return None
def leaveWhitespace(self):
self.skipWhitespace = False
return self
def streamline(self):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate(self, validateTrace=None):
if validateTrace is None:
validateTrace = []
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__(self):
if hasattr(self, "name"):
return self.name
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = str(self.expr)
else:
retString = "None"
finally:
self.__class__ = Forward
return "Forward: "+retString
def copy(self):
if self.expr is not None:
return super().copy()
else:
ret = Forward()
ret << self
return ret
class _ForwardNoRecurse(Forward):
def __str__(self):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of ParseExpression, for converting parsed results."""
def __init__(self, expr, savelist=False):
super().__init__(expr) # , savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super().__init__(*args)
warnings.warn(
"Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,
stacklevel=2)
def postParse(self, instring, loc, tokenlist):
return list(map(string.upper, tokenlist))
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying 'adjacent=False' in the constructor.
"""
def __init__(self, expr, joinString="", adjacent=True):
super().__init__(expr)
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
def ignore(self, other):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super().ignore(other)
return self
def postParse(self, instring, loc, tokenlist):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults(["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults)
if self.resultsName and len(retToks.keys()) > 0:
return [retToks]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of ZeroOrMore and OneOrMore expressions."""
def __init__(self, expr):
super().__init__(expr)
self.saveAsList = True
def postParse(self, instring, loc, tokenlist):
return [tokenlist]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__(self, exprs):
super().__init__(exprs)
self.saveAsList = True
def postParse(self, instring, loc, tokenlist):
for i, tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey, int):
ikey = str(tok[0]).strip()
if len(tok) == 1:
tokenlist[ikey] = _ParseResultsWithOffset("", i)
elif len(tok) == 2 and not isinstance(tok[1], ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)
else:
dictvalue = tok.copy() # ParseResults(i)
del dictvalue[0]
if len(dictvalue) != 1 or (isinstance(dictvalue, ParseResults) and dictvalue.keys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)
if self.resultsName:
return [tokenlist]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse(self, instring, loc, tokenlist):
return []
def suppress(self):
return self
class OnlyOnce:
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = ParserElement._normalizeParseActionArgs(methodCall)
self.called = False
def __call__(self, s, l, t):
if not self.called:
results = self.callable(s, l, t)
self.called = True
return results
raise ParseException(s, l, "")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = ParserElement._normalizeParseActionArgs(f)
def z(*paArgs):
thisFunc = f.func_name
s, l, t = paArgs[-3:]
if len(paArgs) > 3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write(">>entering %s(line: '%s', %d, %s)\n" % (thisFunc, line(l, s), l, t))
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write(f"<<leaving {thisFunc} (exception: {exc})\n")
raise
sys.stderr.write(f"<<leaving {thisFunc} (ret: {ret})\n")
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList(expr, delim=",", combine=False):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing 'combine=True' in the constructor.
If combine is set to True, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = str(expr)+" ["+str(delim)+" "+str(expr)+"]..."
if combine:
return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName)
else:
return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName)
def countedArray(expr):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s, l, t):
n = int(t[0])
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
return (Word(nums).setName("arrayLen").setParseAction(countFieldParseAction, callDuringTry=True) + arrayExpr)
def _flatten(L):
if not isinstance(L, list):
return [L]
if L == []:
return L
return _flatten(L[0]) + _flatten(L[1:])
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match "1:1", but not "1:2". Because this matches a
previous literal, will also match the leading "1:1" in "1:10".
If this is not desired, use matchPreviousExpr.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s, l, t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And([Literal(tt) for tt in tflat])
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match "1:1", but not "1:2". Because this matches by
expressions, will *not* match the leading "1:1" in "1:10";
the expressions are evaluated first, and then compared, so
"1" is compared with "10".
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep << e2
def copyTokenToRepeater(s, l, t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s, l, t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("", 0, "")
rep.setParseAction(mustMatchTheseTokens, callDuringTry=True)
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def _escapeRegexRangeChars(s):
# ~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c, "\\"+c)
s = s.replace("\n", r"\n")
s = s.replace("\t", r"\t")
return str(s)
def oneOf(strs, caseless=False, useRegex=True):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a MatchFirst for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a MatchFirst object (if caseless=True, or
if creating a Regex raises an exception)
"""
if caseless:
isequal = (lambda a, b: a.upper() == b.upper())
masks = (lambda a, b: b.upper().startswith(a.upper()))
parseElementClass = CaselessLiteral
else:
isequal = (lambda a, b: a == b)
masks = (lambda a, b: b.startswith(a))
parseElementClass = Literal
if isinstance(strs, (list, tuple)):
symbols = strs[:]
elif isinstance(strs, str):
symbols = strs.split()
else:
warnings.warn(
"Invalid argument to oneOf, expected string or list",
SyntaxWarning,
stacklevel=2)
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j, other in enumerate(symbols[i+1:]):
if isequal(other, cur):
del symbols[i+j+1]
break
elif masks(cur, other):
del symbols[i+j+1]
symbols.insert(i, other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
# ~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols) == len("".join(symbols)):
return Regex("[%s]" % "".join([_escapeRegexRangeChars(sym) for sym in symbols]))
else:
return Regex("|".join([re.escape(sym) for sym in symbols]))
except Exception:
warnings.warn(
"Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning,
stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst([parseElementClass(sym) for sym in symbols])
def dictOf(key, value):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the Dict, ZeroOrMore, and Group tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the Dict results can include named token
fields.
"""
return Dict(ZeroOrMore(Group(key + value)))
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).setParseAction(lambda s, l, t: t[0][1])
_printables_less_backslash = "".join([c for c in printables if c not in r"\]"])
_escapedHexChar = Combine(Suppress(_bslash + "0x") + Word(hexnums)).setParseAction(lambda s, l, t: chr(int(t[0], 16)))
_escapedOctChar = Combine(Suppress(_bslash) + Word("0", "01234567")).setParseAction(lambda s, l, t: chr(int(t[0], 8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash, exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group(OneOrMore(_charRange | _singleChar)).setResultsName("body") + "]"
def _expanded(p):
return (isinstance(p, ParseResults) and ''.join([chr(c) for c in range(ord(p[0]), ord(p[1])+1)]) or p)
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\0x' (\0x21, which is a '!' character)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
try:
return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])
except Exception:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg, locn, toks):
if col(locn, strg) != n:
raise ParseException(strg, locn, "matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with transformString().
"""
def _replFunc(*args):
return [replStr]
return _replFunc
def removeQuotes(s, l, t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s, l, t):
"""Helper parse action to convert tokens to upper case."""
return [tt.upper() for tt in map(str, t)]
def downcaseTokens(s, l, t):
"""Helper parse action to convert tokens to lower case."""
return [tt.lower() for tt in map(str, t)]
def keepOriginalText(s, startLoc, t):
"""Helper parse action to preserve original parsed text,
overriding any nested parse actions."""
try:
endloc = getTokensEndLoc()
except ParseException:
raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action")
del t[:]
t += ParseResults(s[startLoc:endloc])
return t
def getTokensEndLoc():
"""Method to be called from within a parse action to determine the end
location of the parsed tokens."""
import inspect
fstack = inspect.stack()
try:
# search up the stack (through intervening argument normalizers) for correct calling routine
for f in fstack[2:]:
if f[3] == "_parseNoCache":
endloc = f[0].f_locals["loc"]
return endloc
else:
raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action")
finally:
del fstack
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr, str):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas, alphanums+"_-:")
if xml:
tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes)
openTag = Suppress("<") + tagStr + \
Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) + \
Optional("/", default=[False]).setResultsName("empty").setParseAction(lambda s, l, t: t[0] == '/') + Suppress(">")
else:
printablesLessRAbrack = "".join([c for c in printables if c not in ">"])
tagAttrValue = quotedString.copy().setParseAction(removeQuotes) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr + \
Dict(ZeroOrMore(Group(tagAttrName.setParseAction(downcaseTokens) + Optional(Suppress("=") + tagAttrValue)))) + \
Optional("/", default=[False]).setResultsName("empty").setParseAction(lambda s, l, t: t[0] == '/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":", " ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":", " ").title().split())).setName("</%s>" % tagStr)
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags(tagStr, False)
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags(tagStr, True)
def withAttribute(*args, **attrDict):
"""Helper to create a validating parse action to be used with start tags created
with makeXMLTags or makeHTMLTags. Use withAttribute to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
<TD> or <DIV>.
Call withAttribute with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in (class="Customer",align="right"), or
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
To verify that the attribute exists, but without specifying a value, pass
withAttribute.ANY_VALUE as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k, v) for k, v in attrs]
def pa(s, l, tokens):
for attrName, attrValue in attrs:
if attrName not in tokens:
raise ParseException(s, l, "no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s, l, "attribute '{}' has value '{}', must be '{}'".format(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def operatorPrecedence(baseExpr, opList):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants opAssoc.RIGHT and opAssoc.LEFT.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
"""
ret = Forward()
lastExpr = baseExpr | (Suppress('(') + ret + Suppress(')'))
for operDef in opList:
opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4]
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward() # .setName("expr%d" % i)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group(lastExpr + OneOrMore(opExpr))
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group(lastExpr + OneOrMore(opExpr + lastExpr))
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group(lastExpr + OneOrMore(lastExpr))
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr)
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group(lastExpr + OneOrMore(opExpr + thisExpr))
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group(lastExpr + OneOrMore(thisExpr))
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction(pa)
thisExpr << (matchExpr | lastExpr)
lastExpr = thisExpr
ret << lastExpr
return ret
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy())
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the ignoreExpr argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an Or or MatchFirst.
The default is quotedString, but if no expressions are to be ignored,
then pass None for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener, str) and isinstance(closer, str):
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr + CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS, exact=1)))
.setParseAction(lambda t: t[0].strip()))
else:
content = (empty+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS).setParseAction(lambda t: t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret << Group(Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer))
else:
ret << Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=True)
A valid block must contain at least one blockStatement.
"""
def checkPeerIndent(s, l, t):
if l >= len(s):
return
curCol = col(l, s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s, l, "illegal nesting")
raise ParseException(s, l, "not a peer entry")
def checkSubIndent(s, l, t):
curCol = col(l, s)
if curCol > indentStack[-1]:
indentStack.append(curCol)
else:
raise ParseException(s, l, "not a subentry")
def checkUnindent(s, l, t):
if l >= len(s):
return
curCol = col(l, s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s, l, "not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = Empty() + Empty().setParseAction(checkSubIndent)
PEER = Empty().setParseAction(checkPeerIndent)
UNDENT = Empty().setParseAction(checkUnindent)
if indent:
smExpr = Group(
Optional(NL) + FollowedBy(blockStatementExpr)
+ INDENT + (OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL))) + UNDENT)
else:
smExpr = Group(Optional(NL) + (OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL))))
blockStatementExpr.ignore("\\" + LineEnd())
return smExpr
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag, anyCloseTag = makeHTMLTags(Word(alphas, alphanums+"_:"))
commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") + ";")
_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(), "><& '"))
def replaceHTMLEntity(t):
return t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->")
restOfLine = Regex(r".*").leaveWhitespace()
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_noncomma = "".join([c for c in printables if c != ","])
_commasepitem = Combine(OneOrMore(Word(_noncomma) + Optional(Word(" \t") + ~Literal(",") + ~LineEnd()))).streamline().setName("commaItem")
commaSeparatedList = delimitedList(Optional(quotedString | _commasepitem, default="")).setName("commaSeparatedList")
if __name__ == "__main__":
def test(teststring):
try:
tokens = simpleSQL.parseString(teststring)
tokenlist = tokens.asList()
print(teststring + "->" + str(tokenlist))
print("tokens = " + str(tokens))
print("tokens.columns = " + str(tokens.columns))
print("tokens.tables = " + str(tokens.tables))
print(tokens.asXML("SQL", True))
except ParseBaseException as err:
print(teststring + "->")
print(err.line)
print(" "*(err.column-1) + "^")
print(err)
print()
selectToken = CaselessLiteral("select")
fromToken = CaselessLiteral("from")
ident = Word(alphas, alphanums + "_$")
columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
columnNameList = Group(delimitedList(columnName)) # .setName("columns")
tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
tableNameList = Group(delimitedList(tableName)) # .setName("tables")
simpleSQL = (
selectToken
+ ('*' | columnNameList).setResultsName("columns")
+ fromToken
+ tableNameList.setResultsName("tables"))
test("SELECT * from XYZZY, ABC")
test("select * from SYS.XYZZY")
test("Select A from Sys.dual")
test("Select AA,BB,CC from Sys.dual")
test("Select A, B, C from Sys.dual")
test("Select A, B, C from Sys.dual")
test("Xelect A, B, C from Sys.dual")
test("Select A, B, C frox Sys.dual")
test("Select")
test("Select ^^^ frox Sys.dual")
test("Select A, B, C from Sys.dual, Table2 ")
| mit |
FHannes/intellij-community | python/lib/Lib/encodings/ptcp154.py | 647 | 8950 | """ Python Character Mapping Codec generated from 'PTCP154.txt' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_map)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='ptcp154',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0496, # CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER
0x0081: 0x0492, # CYRILLIC CAPITAL LETTER GHE WITH STROKE
0x0082: 0x04ee, # CYRILLIC CAPITAL LETTER U WITH MACRON
0x0083: 0x0493, # CYRILLIC SMALL LETTER GHE WITH STROKE
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x04b6, # CYRILLIC CAPITAL LETTER CHE WITH DESCENDER
0x0087: 0x04ae, # CYRILLIC CAPITAL LETTER STRAIGHT U
0x0088: 0x04b2, # CYRILLIC CAPITAL LETTER HA WITH DESCENDER
0x0089: 0x04af, # CYRILLIC SMALL LETTER STRAIGHT U
0x008a: 0x04a0, # CYRILLIC CAPITAL LETTER BASHKIR KA
0x008b: 0x04e2, # CYRILLIC CAPITAL LETTER I WITH MACRON
0x008c: 0x04a2, # CYRILLIC CAPITAL LETTER EN WITH DESCENDER
0x008d: 0x049a, # CYRILLIC CAPITAL LETTER KA WITH DESCENDER
0x008e: 0x04ba, # CYRILLIC CAPITAL LETTER SHHA
0x008f: 0x04b8, # CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE
0x0090: 0x0497, # CYRILLIC SMALL LETTER ZHE WITH DESCENDER
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: 0x04b3, # CYRILLIC SMALL LETTER HA WITH DESCENDER
0x0099: 0x04b7, # CYRILLIC SMALL LETTER CHE WITH DESCENDER
0x009a: 0x04a1, # CYRILLIC SMALL LETTER BASHKIR KA
0x009b: 0x04e3, # CYRILLIC SMALL LETTER I WITH MACRON
0x009c: 0x04a3, # CYRILLIC SMALL LETTER EN WITH DESCENDER
0x009d: 0x049b, # CYRILLIC SMALL LETTER KA WITH DESCENDER
0x009e: 0x04bb, # CYRILLIC SMALL LETTER SHHA
0x009f: 0x04b9, # CYRILLIC SMALL LETTER CHE WITH VERTICAL STROKE
0x00a1: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U (Byelorussian)
0x00a2: 0x045e, # CYRILLIC SMALL LETTER SHORT U (Byelorussian)
0x00a3: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x00a4: 0x04e8, # CYRILLIC CAPITAL LETTER BARRED O
0x00a5: 0x0498, # CYRILLIC CAPITAL LETTER ZE WITH DESCENDER
0x00a6: 0x04b0, # CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE
0x00a8: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00aa: 0x04d8, # CYRILLIC CAPITAL LETTER SCHWA
0x00ad: 0x04ef, # CYRILLIC SMALL LETTER U WITH MACRON
0x00af: 0x049c, # CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE
0x00b1: 0x04b1, # CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE
0x00b2: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x00b3: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x00b4: 0x0499, # CYRILLIC SMALL LETTER ZE WITH DESCENDER
0x00b5: 0x04e9, # CYRILLIC SMALL LETTER BARRED O
0x00b8: 0x0451, # CYRILLIC SMALL LETTER IO
0x00b9: 0x2116, # NUMERO SIGN
0x00ba: 0x04d9, # CYRILLIC SMALL LETTER SCHWA
0x00bc: 0x0458, # CYRILLIC SMALL LETTER JE
0x00bd: 0x04aa, # CYRILLIC CAPITAL LETTER ES WITH DESCENDER
0x00be: 0x04ab, # CYRILLIC SMALL LETTER ES WITH DESCENDER
0x00bf: 0x049d, # CYRILLIC SMALL LETTER KA WITH VERTICAL STROKE
0x00c0: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00c1: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00c2: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00c3: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00c4: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00c5: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00c6: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00c7: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00c8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00c9: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00ca: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00cb: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00cc: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00cd: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00ce: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00cf: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00d0: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00d1: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00d2: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00d3: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00d4: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00d5: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00d6: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00d7: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00d8: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00d9: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00da: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00db: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00dc: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00dd: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00de: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x00df: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00e0: 0x0430, # CYRILLIC SMALL LETTER A
0x00e1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00e2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00e3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00e4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00e5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00e6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00e7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00e8: 0x0438, # CYRILLIC SMALL LETTER I
0x00e9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00ea: 0x043a, # CYRILLIC SMALL LETTER KA
0x00eb: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ec: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ed: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ee: 0x043e, # CYRILLIC SMALL LETTER O
0x00ef: 0x043f, # CYRILLIC SMALL LETTER PE
0x00f0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00f1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00f2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00f3: 0x0443, # CYRILLIC SMALL LETTER U
0x00f4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00f5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00f6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00f7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00f8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00fa: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00fb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00fc: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00fd: 0x044d, # CYRILLIC SMALL LETTER E
0x00fe: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ff: 0x044f, # CYRILLIC SMALL LETTER YA
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
| apache-2.0 |
rudhir-upretee/SUMO_Src | tools/net/xmledges_applyOffset.py | 3 | 2113 | #!/usr/bin/env python
"""
@file xmledges_applyOffset.py
@author Daniel Krajzewicz
@date 2009-08-01
@version $Id: xmledges_applyOffset.py 11671 2012-01-07 20:14:30Z behrisch $
Applies a given offset to edges given in an xml-edge-file.
The results are written into <XMLEDGES>.mod.xml.
Call: xmledges_applyOffset.py <XMLEDGES> <X-OFFSET> <Y-OFFSET>
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2009-2012 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import os, string, sys, StringIO
from xml.sax import saxutils, make_parser, handler
class XMLNodesReader(handler.ContentHandler):
def __init__(self, outFileName, xoff, yoff):
self._out = open(outFileName, 'w')
self._xoff = xoff
self._yoff = yoff
def endDocument(self):
self._out.close()
def startElement(self, name, attrs):
self._out.write('<' + name)
for (key, value) in attrs.items():
if key == "shape":
shape = value.split(" ")
nshape = []
for s in shape:
(x,y) = s.split(",")
nshape.append(str(float(x)+self._xoff)+","+str(float(y)+self._yoff))
self._out.write(' %s="%s"' % (key, saxutils.escape(" ".join(nshape))))
else:
self._out.write(' %s="%s"' % (key, saxutils.escape(value)))
self._out.write('>')
def endElement(self, name):
self._out.write('</' + name + '>')
def characters(self, content):
self._out.write(saxutils.escape(content))
def ignorableWhitespace(self, content):
self._out.write(content)
def processingInstruction(self, target, data):
self._out.write('<?%s %s?>' % (target, data))
if len(sys.argv) < 4:
print "Usage: " + sys.argv[0] + " <XMLEDGES> <X-OFFSET> <Y-OFFSET>"
sys.exit()
parser = make_parser()
reader = XMLNodesReader(sys.argv[1]+".mod.xml", float(sys.argv[2]), float(sys.argv[3]))
parser.setContentHandler(reader)
parser.parse(sys.argv[1])
| gpl-3.0 |
garg10may/youtube-dl | youtube_dl/extractor/videofyme.py | 126 | 1756 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
find_xpath_attr,
int_or_none,
)
class VideofyMeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.videofy\.me/.+?|p\.videofy\.me/v)/(?P<id>\d+)(&|#|$)'
IE_NAME = 'videofy.me'
_TEST = {
'url': 'http://www.videofy.me/thisisvideofyme/1100701',
'md5': 'c77d700bdc16ae2e9f3c26019bd96143',
'info_dict': {
'id': '1100701',
'ext': 'mp4',
'title': 'This is VideofyMe',
'description': None,
'uploader': 'VideofyMe',
'uploader_id': 'thisisvideofyme',
'view_count': int,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
config = self._download_xml('http://sunshine.videofy.me/?videoId=%s' % video_id,
video_id)
video = config.find('video')
sources = video.find('sources')
url_node = next(node for node in [find_xpath_attr(sources, 'source', 'id', 'HQ %s' % key)
for key in ['on', 'av', 'off']] if node is not None)
video_url = url_node.find('url').text
view_count = int_or_none(self._search_regex(
r'([0-9]+)', video.find('views').text, 'view count', fatal=False))
return {
'id': video_id,
'title': video.find('title').text,
'url': video_url,
'thumbnail': video.find('thumb').text,
'description': video.find('description').text,
'uploader': config.find('blog/name').text,
'uploader_id': video.find('identifier').text,
'view_count': view_count,
}
| unlicense |
lbryio/lbry | lbry/lbry/schema/types/v2/claim_pb2.py | 1 | 134987 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: claim.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='claim.proto',
package='pb',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0b\x63laim.proto\x12\x02pb\"\xab\x02\n\x05\x43laim\x12\x1c\n\x06stream\x18\x01 \x01(\x0b\x32\n.pb.StreamH\x00\x12\x1e\n\x07\x63hannel\x18\x02 \x01(\x0b\x32\x0b.pb.ChannelH\x00\x12#\n\ncollection\x18\x03 \x01(\x0b\x32\r.pb.ClaimListH\x00\x12$\n\x06repost\x18\x04 \x01(\x0b\x32\x12.pb.ClaimReferenceH\x00\x12\r\n\x05title\x18\x08 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\t \x01(\t\x12\x1d\n\tthumbnail\x18\n \x01(\x0b\x32\n.pb.Source\x12\x0c\n\x04tags\x18\x0b \x03(\t\x12\x1f\n\tlanguages\x18\x0c \x03(\x0b\x32\x0c.pb.Language\x12\x1f\n\tlocations\x18\r \x03(\x0b\x32\x0c.pb.LocationB\x06\n\x04type\"\x84\x02\n\x06Stream\x12\x1a\n\x06source\x18\x01 \x01(\x0b\x32\n.pb.Source\x12\x0e\n\x06\x61uthor\x18\x02 \x01(\t\x12\x0f\n\x07license\x18\x03 \x01(\t\x12\x13\n\x0blicense_url\x18\x04 \x01(\t\x12\x14\n\x0crelease_time\x18\x05 \x01(\x03\x12\x14\n\x03\x66\x65\x65\x18\x06 \x01(\x0b\x32\x07.pb.Fee\x12\x1a\n\x05image\x18\n \x01(\x0b\x32\t.pb.ImageH\x00\x12\x1a\n\x05video\x18\x0b \x01(\x0b\x32\t.pb.VideoH\x00\x12\x1a\n\x05\x61udio\x18\x0c \x01(\x0b\x32\t.pb.AudioH\x00\x12 \n\x08software\x18\r \x01(\x0b\x32\x0c.pb.SoftwareH\x00\x42\x06\n\x04type\"}\n\x07\x43hannel\x12\x12\n\npublic_key\x18\x01 \x01(\x0c\x12\r\n\x05\x65mail\x18\x02 \x01(\t\x12\x13\n\x0bwebsite_url\x18\x03 \x01(\t\x12\x19\n\x05\x63over\x18\x04 \x01(\x0b\x32\n.pb.Source\x12\x1f\n\x08\x66\x65\x61tured\x18\x05 \x01(\x0b\x32\r.pb.ClaimList\"$\n\x0e\x43laimReference\x12\x12\n\nclaim_hash\x18\x01 \x01(\x0c\"\x90\x01\n\tClaimList\x12)\n\tlist_type\x18\x01 \x01(\x0e\x32\x16.pb.ClaimList.ListType\x12,\n\x10\x63laim_references\x18\x02 \x03(\x0b\x32\x12.pb.ClaimReference\"*\n\x08ListType\x12\x0e\n\nCOLLECTION\x10\x00\x12\x0e\n\nDERIVATION\x10\x02\"d\n\x06Source\x12\x0c\n\x04hash\x18\x01 \x01(\x0c\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04size\x18\x03 \x01(\x04\x12\x12\n\nmedia_type\x18\x04 \x01(\t\x12\x0b\n\x03url\x18\x05 \x01(\t\x12\x0f\n\x07sd_hash\x18\x06 \x01(\x0c\"\x87\x01\n\x03\x46\x65\x65\x12\"\n\x08\x63urrency\x18\x01 \x01(\x0e\x32\x10.pb.Fee.Currency\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0c\x12\x0e\n\x06\x61mount\x18\x03 \x01(\x04\";\n\x08\x43urrency\x12\x14\n\x10UNKNOWN_CURRENCY\x10\x00\x12\x07\n\x03LBC\x10\x01\x12\x07\n\x03\x42TC\x10\x02\x12\x07\n\x03USD\x10\x03\"&\n\x05Image\x12\r\n\x05width\x18\x01 \x01(\r\x12\x0e\n\x06height\x18\x02 \x01(\r\"R\n\x05Video\x12\r\n\x05width\x18\x01 \x01(\r\x12\x0e\n\x06height\x18\x02 \x01(\r\x12\x10\n\x08\x64uration\x18\x03 \x01(\r\x12\x18\n\x05\x61udio\x18\x0f \x01(\x0b\x32\t.pb.Audio\"\x19\n\x05\x41udio\x12\x10\n\x08\x64uration\x18\x01 \x01(\r\"l\n\x08Software\x12\n\n\x02os\x18\x01 \x01(\t\"T\n\x02OS\x12\x0e\n\nUNKNOWN_OS\x10\x00\x12\x07\n\x03\x41NY\x10\x01\x12\t\n\x05LINUX\x10\x02\x12\x0b\n\x07WINDOWS\x10\x03\x12\x07\n\x03MAC\x10\x04\x12\x0b\n\x07\x41NDROID\x10\x05\x12\x07\n\x03IOS\x10\x06\"\xc7\x1d\n\x08Language\x12\'\n\x08language\x18\x01 \x01(\x0e\x32\x15.pb.Language.Language\x12#\n\x06script\x18\x02 \x01(\x0e\x32\x13.pb.Language.Script\x12$\n\x06region\x18\x03 \x01(\x0e\x32\x14.pb.Location.Country\"\x99\x0c\n\x08Language\x12\x14\n\x10UNKNOWN_LANGUAGE\x10\x00\x12\x06\n\x02\x65n\x10\x01\x12\x06\n\x02\x61\x61\x10\x02\x12\x06\n\x02\x61\x62\x10\x03\x12\x06\n\x02\x61\x65\x10\x04\x12\x06\n\x02\x61\x66\x10\x05\x12\x06\n\x02\x61k\x10\x06\x12\x06\n\x02\x61m\x10\x07\x12\x06\n\x02\x61n\x10\x08\x12\x06\n\x02\x61r\x10\t\x12\x06\n\x02\x61s\x10\n\x12\x06\n\x02\x61v\x10\x0b\x12\x06\n\x02\x61y\x10\x0c\x12\x06\n\x02\x61z\x10\r\x12\x06\n\x02\x62\x61\x10\x0e\x12\x06\n\x02\x62\x65\x10\x0f\x12\x06\n\x02\x62g\x10\x10\x12\x06\n\x02\x62h\x10\x11\x12\x06\n\x02\x62i\x10\x12\x12\x06\n\x02\x62m\x10\x13\x12\x06\n\x02\x62n\x10\x14\x12\x06\n\x02\x62o\x10\x15\x12\x06\n\x02\x62r\x10\x16\x12\x06\n\x02\x62s\x10\x17\x12\x06\n\x02\x63\x61\x10\x18\x12\x06\n\x02\x63\x65\x10\x19\x12\x06\n\x02\x63h\x10\x1a\x12\x06\n\x02\x63o\x10\x1b\x12\x06\n\x02\x63r\x10\x1c\x12\x06\n\x02\x63s\x10\x1d\x12\x06\n\x02\x63u\x10\x1e\x12\x06\n\x02\x63v\x10\x1f\x12\x06\n\x02\x63y\x10 \x12\x06\n\x02\x64\x61\x10!\x12\x06\n\x02\x64\x65\x10\"\x12\x06\n\x02\x64v\x10#\x12\x06\n\x02\x64z\x10$\x12\x06\n\x02\x65\x65\x10%\x12\x06\n\x02\x65l\x10&\x12\x06\n\x02\x65o\x10\'\x12\x06\n\x02\x65s\x10(\x12\x06\n\x02\x65t\x10)\x12\x06\n\x02\x65u\x10*\x12\x06\n\x02\x66\x61\x10+\x12\x06\n\x02\x66\x66\x10,\x12\x06\n\x02\x66i\x10-\x12\x06\n\x02\x66j\x10.\x12\x06\n\x02\x66o\x10/\x12\x06\n\x02\x66r\x10\x30\x12\x06\n\x02\x66y\x10\x31\x12\x06\n\x02ga\x10\x32\x12\x06\n\x02gd\x10\x33\x12\x06\n\x02gl\x10\x34\x12\x06\n\x02gn\x10\x35\x12\x06\n\x02gu\x10\x36\x12\x06\n\x02gv\x10\x37\x12\x06\n\x02ha\x10\x38\x12\x06\n\x02he\x10\x39\x12\x06\n\x02hi\x10:\x12\x06\n\x02ho\x10;\x12\x06\n\x02hr\x10<\x12\x06\n\x02ht\x10=\x12\x06\n\x02hu\x10>\x12\x06\n\x02hy\x10?\x12\x06\n\x02hz\x10@\x12\x06\n\x02ia\x10\x41\x12\x06\n\x02id\x10\x42\x12\x06\n\x02ie\x10\x43\x12\x06\n\x02ig\x10\x44\x12\x06\n\x02ii\x10\x45\x12\x06\n\x02ik\x10\x46\x12\x06\n\x02io\x10G\x12\x06\n\x02is\x10H\x12\x06\n\x02it\x10I\x12\x06\n\x02iu\x10J\x12\x06\n\x02ja\x10K\x12\x06\n\x02jv\x10L\x12\x06\n\x02ka\x10M\x12\x06\n\x02kg\x10N\x12\x06\n\x02ki\x10O\x12\x06\n\x02kj\x10P\x12\x06\n\x02kk\x10Q\x12\x06\n\x02kl\x10R\x12\x06\n\x02km\x10S\x12\x06\n\x02kn\x10T\x12\x06\n\x02ko\x10U\x12\x06\n\x02kr\x10V\x12\x06\n\x02ks\x10W\x12\x06\n\x02ku\x10X\x12\x06\n\x02kv\x10Y\x12\x06\n\x02kw\x10Z\x12\x06\n\x02ky\x10[\x12\x06\n\x02la\x10\\\x12\x06\n\x02lb\x10]\x12\x06\n\x02lg\x10^\x12\x06\n\x02li\x10_\x12\x06\n\x02ln\x10`\x12\x06\n\x02lo\x10\x61\x12\x06\n\x02lt\x10\x62\x12\x06\n\x02lu\x10\x63\x12\x06\n\x02lv\x10\x64\x12\x06\n\x02mg\x10\x65\x12\x06\n\x02mh\x10\x66\x12\x06\n\x02mi\x10g\x12\x06\n\x02mk\x10h\x12\x06\n\x02ml\x10i\x12\x06\n\x02mn\x10j\x12\x06\n\x02mr\x10k\x12\x06\n\x02ms\x10l\x12\x06\n\x02mt\x10m\x12\x06\n\x02my\x10n\x12\x06\n\x02na\x10o\x12\x06\n\x02nb\x10p\x12\x06\n\x02nd\x10q\x12\x06\n\x02ne\x10r\x12\x06\n\x02ng\x10s\x12\x06\n\x02nl\x10t\x12\x06\n\x02nn\x10u\x12\x06\n\x02no\x10v\x12\x06\n\x02nr\x10w\x12\x06\n\x02nv\x10x\x12\x06\n\x02ny\x10y\x12\x06\n\x02oc\x10z\x12\x06\n\x02oj\x10{\x12\x06\n\x02om\x10|\x12\x06\n\x02or\x10}\x12\x06\n\x02os\x10~\x12\x06\n\x02pa\x10\x7f\x12\x07\n\x02pi\x10\x80\x01\x12\x07\n\x02pl\x10\x81\x01\x12\x07\n\x02ps\x10\x82\x01\x12\x07\n\x02pt\x10\x83\x01\x12\x07\n\x02qu\x10\x84\x01\x12\x07\n\x02rm\x10\x85\x01\x12\x07\n\x02rn\x10\x86\x01\x12\x07\n\x02ro\x10\x87\x01\x12\x07\n\x02ru\x10\x88\x01\x12\x07\n\x02rw\x10\x89\x01\x12\x07\n\x02sa\x10\x8a\x01\x12\x07\n\x02sc\x10\x8b\x01\x12\x07\n\x02sd\x10\x8c\x01\x12\x07\n\x02se\x10\x8d\x01\x12\x07\n\x02sg\x10\x8e\x01\x12\x07\n\x02si\x10\x8f\x01\x12\x07\n\x02sk\x10\x90\x01\x12\x07\n\x02sl\x10\x91\x01\x12\x07\n\x02sm\x10\x92\x01\x12\x07\n\x02sn\x10\x93\x01\x12\x07\n\x02so\x10\x94\x01\x12\x07\n\x02sq\x10\x95\x01\x12\x07\n\x02sr\x10\x96\x01\x12\x07\n\x02ss\x10\x97\x01\x12\x07\n\x02st\x10\x98\x01\x12\x07\n\x02su\x10\x99\x01\x12\x07\n\x02sv\x10\x9a\x01\x12\x07\n\x02sw\x10\x9b\x01\x12\x07\n\x02ta\x10\x9c\x01\x12\x07\n\x02te\x10\x9d\x01\x12\x07\n\x02tg\x10\x9e\x01\x12\x07\n\x02th\x10\x9f\x01\x12\x07\n\x02ti\x10\xa0\x01\x12\x07\n\x02tk\x10\xa1\x01\x12\x07\n\x02tl\x10\xa2\x01\x12\x07\n\x02tn\x10\xa3\x01\x12\x07\n\x02to\x10\xa4\x01\x12\x07\n\x02tr\x10\xa5\x01\x12\x07\n\x02ts\x10\xa6\x01\x12\x07\n\x02tt\x10\xa7\x01\x12\x07\n\x02tw\x10\xa8\x01\x12\x07\n\x02ty\x10\xa9\x01\x12\x07\n\x02ug\x10\xaa\x01\x12\x07\n\x02uk\x10\xab\x01\x12\x07\n\x02ur\x10\xac\x01\x12\x07\n\x02uz\x10\xad\x01\x12\x07\n\x02ve\x10\xae\x01\x12\x07\n\x02vi\x10\xaf\x01\x12\x07\n\x02vo\x10\xb0\x01\x12\x07\n\x02wa\x10\xb1\x01\x12\x07\n\x02wo\x10\xb2\x01\x12\x07\n\x02xh\x10\xb3\x01\x12\x07\n\x02yi\x10\xb4\x01\x12\x07\n\x02yo\x10\xb5\x01\x12\x07\n\x02za\x10\xb6\x01\x12\x07\n\x02zh\x10\xb7\x01\x12\x07\n\x02zu\x10\xb8\x01\"\xaa\x10\n\x06Script\x12\x12\n\x0eUNKNOWN_SCRIPT\x10\x00\x12\x08\n\x04\x41\x64lm\x10\x01\x12\x08\n\x04\x41\x66\x61k\x10\x02\x12\x08\n\x04\x41ghb\x10\x03\x12\x08\n\x04\x41hom\x10\x04\x12\x08\n\x04\x41rab\x10\x05\x12\x08\n\x04\x41ran\x10\x06\x12\x08\n\x04\x41rmi\x10\x07\x12\x08\n\x04\x41rmn\x10\x08\x12\x08\n\x04\x41vst\x10\t\x12\x08\n\x04\x42\x61li\x10\n\x12\x08\n\x04\x42\x61mu\x10\x0b\x12\x08\n\x04\x42\x61ss\x10\x0c\x12\x08\n\x04\x42\x61tk\x10\r\x12\x08\n\x04\x42\x65ng\x10\x0e\x12\x08\n\x04\x42hks\x10\x0f\x12\x08\n\x04\x42lis\x10\x10\x12\x08\n\x04\x42opo\x10\x11\x12\x08\n\x04\x42rah\x10\x12\x12\x08\n\x04\x42rai\x10\x13\x12\x08\n\x04\x42ugi\x10\x14\x12\x08\n\x04\x42uhd\x10\x15\x12\x08\n\x04\x43\x61km\x10\x16\x12\x08\n\x04\x43\x61ns\x10\x17\x12\x08\n\x04\x43\x61ri\x10\x18\x12\x08\n\x04\x43ham\x10\x19\x12\x08\n\x04\x43her\x10\x1a\x12\x08\n\x04\x43irt\x10\x1b\x12\x08\n\x04\x43opt\x10\x1c\x12\x08\n\x04\x43pmn\x10\x1d\x12\x08\n\x04\x43prt\x10\x1e\x12\x08\n\x04\x43yrl\x10\x1f\x12\x08\n\x04\x43yrs\x10 \x12\x08\n\x04\x44\x65va\x10!\x12\x08\n\x04\x44ogr\x10\"\x12\x08\n\x04\x44srt\x10#\x12\x08\n\x04\x44upl\x10$\x12\x08\n\x04\x45gyd\x10%\x12\x08\n\x04\x45gyh\x10&\x12\x08\n\x04\x45gyp\x10\'\x12\x08\n\x04\x45lba\x10(\x12\x08\n\x04\x45lym\x10)\x12\x08\n\x04\x45thi\x10*\x12\x08\n\x04Geok\x10+\x12\x08\n\x04Geor\x10,\x12\x08\n\x04Glag\x10-\x12\x08\n\x04Gong\x10.\x12\x08\n\x04Gonm\x10/\x12\x08\n\x04Goth\x10\x30\x12\x08\n\x04Gran\x10\x31\x12\x08\n\x04Grek\x10\x32\x12\x08\n\x04Gujr\x10\x33\x12\x08\n\x04Guru\x10\x34\x12\x08\n\x04Hanb\x10\x35\x12\x08\n\x04Hang\x10\x36\x12\x08\n\x04Hani\x10\x37\x12\x08\n\x04Hano\x10\x38\x12\x08\n\x04Hans\x10\x39\x12\x08\n\x04Hant\x10:\x12\x08\n\x04Hatr\x10;\x12\x08\n\x04Hebr\x10<\x12\x08\n\x04Hira\x10=\x12\x08\n\x04Hluw\x10>\x12\x08\n\x04Hmng\x10?\x12\x08\n\x04Hmnp\x10@\x12\x08\n\x04Hrkt\x10\x41\x12\x08\n\x04Hung\x10\x42\x12\x08\n\x04Inds\x10\x43\x12\x08\n\x04Ital\x10\x44\x12\x08\n\x04Jamo\x10\x45\x12\x08\n\x04Java\x10\x46\x12\x08\n\x04Jpan\x10G\x12\x08\n\x04Jurc\x10H\x12\x08\n\x04Kali\x10I\x12\x08\n\x04Kana\x10J\x12\x08\n\x04Khar\x10K\x12\x08\n\x04Khmr\x10L\x12\x08\n\x04Khoj\x10M\x12\x08\n\x04Kitl\x10N\x12\x08\n\x04Kits\x10O\x12\x08\n\x04Knda\x10P\x12\x08\n\x04Kore\x10Q\x12\x08\n\x04Kpel\x10R\x12\x08\n\x04Kthi\x10S\x12\x08\n\x04Lana\x10T\x12\x08\n\x04Laoo\x10U\x12\x08\n\x04Latf\x10V\x12\x08\n\x04Latg\x10W\x12\x08\n\x04Latn\x10X\x12\x08\n\x04Leke\x10Y\x12\x08\n\x04Lepc\x10Z\x12\x08\n\x04Limb\x10[\x12\x08\n\x04Lina\x10\\\x12\x08\n\x04Linb\x10]\x12\x08\n\x04Lisu\x10^\x12\x08\n\x04Loma\x10_\x12\x08\n\x04Lyci\x10`\x12\x08\n\x04Lydi\x10\x61\x12\x08\n\x04Mahj\x10\x62\x12\x08\n\x04Maka\x10\x63\x12\x08\n\x04Mand\x10\x64\x12\x08\n\x04Mani\x10\x65\x12\x08\n\x04Marc\x10\x66\x12\x08\n\x04Maya\x10g\x12\x08\n\x04Medf\x10h\x12\x08\n\x04Mend\x10i\x12\x08\n\x04Merc\x10j\x12\x08\n\x04Mero\x10k\x12\x08\n\x04Mlym\x10l\x12\x08\n\x04Modi\x10m\x12\x08\n\x04Mong\x10n\x12\x08\n\x04Moon\x10o\x12\x08\n\x04Mroo\x10p\x12\x08\n\x04Mtei\x10q\x12\x08\n\x04Mult\x10r\x12\x08\n\x04Mymr\x10s\x12\x08\n\x04Nand\x10t\x12\x08\n\x04Narb\x10u\x12\x08\n\x04Nbat\x10v\x12\x08\n\x04Newa\x10w\x12\x08\n\x04Nkdb\x10x\x12\x08\n\x04Nkgb\x10y\x12\x08\n\x04Nkoo\x10z\x12\x08\n\x04Nshu\x10{\x12\x08\n\x04Ogam\x10|\x12\x08\n\x04Olck\x10}\x12\x08\n\x04Orkh\x10~\x12\x08\n\x04Orya\x10\x7f\x12\t\n\x04Osge\x10\x80\x01\x12\t\n\x04Osma\x10\x81\x01\x12\t\n\x04Palm\x10\x82\x01\x12\t\n\x04Pauc\x10\x83\x01\x12\t\n\x04Perm\x10\x84\x01\x12\t\n\x04Phag\x10\x85\x01\x12\t\n\x04Phli\x10\x86\x01\x12\t\n\x04Phlp\x10\x87\x01\x12\t\n\x04Phlv\x10\x88\x01\x12\t\n\x04Phnx\x10\x89\x01\x12\t\n\x04Plrd\x10\x8a\x01\x12\t\n\x04Piqd\x10\x8b\x01\x12\t\n\x04Prti\x10\x8c\x01\x12\t\n\x04Qaaa\x10\x8d\x01\x12\t\n\x04Qabx\x10\x8e\x01\x12\t\n\x04Rjng\x10\x8f\x01\x12\t\n\x04Rohg\x10\x90\x01\x12\t\n\x04Roro\x10\x91\x01\x12\t\n\x04Runr\x10\x92\x01\x12\t\n\x04Samr\x10\x93\x01\x12\t\n\x04Sara\x10\x94\x01\x12\t\n\x04Sarb\x10\x95\x01\x12\t\n\x04Saur\x10\x96\x01\x12\t\n\x04Sgnw\x10\x97\x01\x12\t\n\x04Shaw\x10\x98\x01\x12\t\n\x04Shrd\x10\x99\x01\x12\t\n\x04Shui\x10\x9a\x01\x12\t\n\x04Sidd\x10\x9b\x01\x12\t\n\x04Sind\x10\x9c\x01\x12\t\n\x04Sinh\x10\x9d\x01\x12\t\n\x04Sogd\x10\x9e\x01\x12\t\n\x04Sogo\x10\x9f\x01\x12\t\n\x04Sora\x10\xa0\x01\x12\t\n\x04Soyo\x10\xa1\x01\x12\t\n\x04Sund\x10\xa2\x01\x12\t\n\x04Sylo\x10\xa3\x01\x12\t\n\x04Syrc\x10\xa4\x01\x12\t\n\x04Syre\x10\xa5\x01\x12\t\n\x04Syrj\x10\xa6\x01\x12\t\n\x04Syrn\x10\xa7\x01\x12\t\n\x04Tagb\x10\xa8\x01\x12\t\n\x04Takr\x10\xa9\x01\x12\t\n\x04Tale\x10\xaa\x01\x12\t\n\x04Talu\x10\xab\x01\x12\t\n\x04Taml\x10\xac\x01\x12\t\n\x04Tang\x10\xad\x01\x12\t\n\x04Tavt\x10\xae\x01\x12\t\n\x04Telu\x10\xaf\x01\x12\t\n\x04Teng\x10\xb0\x01\x12\t\n\x04Tfng\x10\xb1\x01\x12\t\n\x04Tglg\x10\xb2\x01\x12\t\n\x04Thaa\x10\xb3\x01\x12\t\n\x04Thai\x10\xb4\x01\x12\t\n\x04Tibt\x10\xb5\x01\x12\t\n\x04Tirh\x10\xb6\x01\x12\t\n\x04Ugar\x10\xb7\x01\x12\t\n\x04Vaii\x10\xb8\x01\x12\t\n\x04Visp\x10\xb9\x01\x12\t\n\x04Wara\x10\xba\x01\x12\t\n\x04Wcho\x10\xbb\x01\x12\t\n\x04Wole\x10\xbc\x01\x12\t\n\x04Xpeo\x10\xbd\x01\x12\t\n\x04Xsux\x10\xbe\x01\x12\t\n\x04Yiii\x10\xbf\x01\x12\t\n\x04Zanb\x10\xc0\x01\x12\t\n\x04Zinh\x10\xc1\x01\x12\t\n\x04Zmth\x10\xc2\x01\x12\t\n\x04Zsye\x10\xc3\x01\x12\t\n\x04Zsym\x10\xc4\x01\x12\t\n\x04Zxxx\x10\xc5\x01\x12\t\n\x04Zyyy\x10\xc6\x01\x12\t\n\x04Zzzz\x10\xc7\x01\"\xe4\x11\n\x08Location\x12%\n\x07\x63ountry\x18\x01 \x01(\x0e\x32\x14.pb.Location.Country\x12\r\n\x05state\x18\x02 \x01(\t\x12\x0c\n\x04\x63ity\x18\x03 \x01(\t\x12\x0c\n\x04\x63ode\x18\x04 \x01(\t\x12\x10\n\x08latitude\x18\x05 \x01(\x11\x12\x11\n\tlongitude\x18\x06 \x01(\x11\"\xe0\x10\n\x07\x43ountry\x12\x13\n\x0fUNKNOWN_COUNTRY\x10\x00\x12\x06\n\x02\x41\x46\x10\x01\x12\x06\n\x02\x41X\x10\x02\x12\x06\n\x02\x41L\x10\x03\x12\x06\n\x02\x44Z\x10\x04\x12\x06\n\x02\x41S\x10\x05\x12\x06\n\x02\x41\x44\x10\x06\x12\x06\n\x02\x41O\x10\x07\x12\x06\n\x02\x41I\x10\x08\x12\x06\n\x02\x41Q\x10\t\x12\x06\n\x02\x41G\x10\n\x12\x06\n\x02\x41R\x10\x0b\x12\x06\n\x02\x41M\x10\x0c\x12\x06\n\x02\x41W\x10\r\x12\x06\n\x02\x41U\x10\x0e\x12\x06\n\x02\x41T\x10\x0f\x12\x06\n\x02\x41Z\x10\x10\x12\x06\n\x02\x42S\x10\x11\x12\x06\n\x02\x42H\x10\x12\x12\x06\n\x02\x42\x44\x10\x13\x12\x06\n\x02\x42\x42\x10\x14\x12\x06\n\x02\x42Y\x10\x15\x12\x06\n\x02\x42\x45\x10\x16\x12\x06\n\x02\x42Z\x10\x17\x12\x06\n\x02\x42J\x10\x18\x12\x06\n\x02\x42M\x10\x19\x12\x06\n\x02\x42T\x10\x1a\x12\x06\n\x02\x42O\x10\x1b\x12\x06\n\x02\x42Q\x10\x1c\x12\x06\n\x02\x42\x41\x10\x1d\x12\x06\n\x02\x42W\x10\x1e\x12\x06\n\x02\x42V\x10\x1f\x12\x06\n\x02\x42R\x10 \x12\x06\n\x02IO\x10!\x12\x06\n\x02\x42N\x10\"\x12\x06\n\x02\x42G\x10#\x12\x06\n\x02\x42\x46\x10$\x12\x06\n\x02\x42I\x10%\x12\x06\n\x02KH\x10&\x12\x06\n\x02\x43M\x10\'\x12\x06\n\x02\x43\x41\x10(\x12\x06\n\x02\x43V\x10)\x12\x06\n\x02KY\x10*\x12\x06\n\x02\x43\x46\x10+\x12\x06\n\x02TD\x10,\x12\x06\n\x02\x43L\x10-\x12\x06\n\x02\x43N\x10.\x12\x06\n\x02\x43X\x10/\x12\x06\n\x02\x43\x43\x10\x30\x12\x06\n\x02\x43O\x10\x31\x12\x06\n\x02KM\x10\x32\x12\x06\n\x02\x43G\x10\x33\x12\x06\n\x02\x43\x44\x10\x34\x12\x06\n\x02\x43K\x10\x35\x12\x06\n\x02\x43R\x10\x36\x12\x06\n\x02\x43I\x10\x37\x12\x06\n\x02HR\x10\x38\x12\x06\n\x02\x43U\x10\x39\x12\x06\n\x02\x43W\x10:\x12\x06\n\x02\x43Y\x10;\x12\x06\n\x02\x43Z\x10<\x12\x06\n\x02\x44K\x10=\x12\x06\n\x02\x44J\x10>\x12\x06\n\x02\x44M\x10?\x12\x06\n\x02\x44O\x10@\x12\x06\n\x02\x45\x43\x10\x41\x12\x06\n\x02\x45G\x10\x42\x12\x06\n\x02SV\x10\x43\x12\x06\n\x02GQ\x10\x44\x12\x06\n\x02\x45R\x10\x45\x12\x06\n\x02\x45\x45\x10\x46\x12\x06\n\x02\x45T\x10G\x12\x06\n\x02\x46K\x10H\x12\x06\n\x02\x46O\x10I\x12\x06\n\x02\x46J\x10J\x12\x06\n\x02\x46I\x10K\x12\x06\n\x02\x46R\x10L\x12\x06\n\x02GF\x10M\x12\x06\n\x02PF\x10N\x12\x06\n\x02TF\x10O\x12\x06\n\x02GA\x10P\x12\x06\n\x02GM\x10Q\x12\x06\n\x02GE\x10R\x12\x06\n\x02\x44\x45\x10S\x12\x06\n\x02GH\x10T\x12\x06\n\x02GI\x10U\x12\x06\n\x02GR\x10V\x12\x06\n\x02GL\x10W\x12\x06\n\x02GD\x10X\x12\x06\n\x02GP\x10Y\x12\x06\n\x02GU\x10Z\x12\x06\n\x02GT\x10[\x12\x06\n\x02GG\x10\\\x12\x06\n\x02GN\x10]\x12\x06\n\x02GW\x10^\x12\x06\n\x02GY\x10_\x12\x06\n\x02HT\x10`\x12\x06\n\x02HM\x10\x61\x12\x06\n\x02VA\x10\x62\x12\x06\n\x02HN\x10\x63\x12\x06\n\x02HK\x10\x64\x12\x06\n\x02HU\x10\x65\x12\x06\n\x02IS\x10\x66\x12\x06\n\x02IN\x10g\x12\x06\n\x02ID\x10h\x12\x06\n\x02IR\x10i\x12\x06\n\x02IQ\x10j\x12\x06\n\x02IE\x10k\x12\x06\n\x02IM\x10l\x12\x06\n\x02IL\x10m\x12\x06\n\x02IT\x10n\x12\x06\n\x02JM\x10o\x12\x06\n\x02JP\x10p\x12\x06\n\x02JE\x10q\x12\x06\n\x02JO\x10r\x12\x06\n\x02KZ\x10s\x12\x06\n\x02KE\x10t\x12\x06\n\x02KI\x10u\x12\x06\n\x02KP\x10v\x12\x06\n\x02KR\x10w\x12\x06\n\x02KW\x10x\x12\x06\n\x02KG\x10y\x12\x06\n\x02LA\x10z\x12\x06\n\x02LV\x10{\x12\x06\n\x02LB\x10|\x12\x06\n\x02LS\x10}\x12\x06\n\x02LR\x10~\x12\x06\n\x02LY\x10\x7f\x12\x07\n\x02LI\x10\x80\x01\x12\x07\n\x02LT\x10\x81\x01\x12\x07\n\x02LU\x10\x82\x01\x12\x07\n\x02MO\x10\x83\x01\x12\x07\n\x02MK\x10\x84\x01\x12\x07\n\x02MG\x10\x85\x01\x12\x07\n\x02MW\x10\x86\x01\x12\x07\n\x02MY\x10\x87\x01\x12\x07\n\x02MV\x10\x88\x01\x12\x07\n\x02ML\x10\x89\x01\x12\x07\n\x02MT\x10\x8a\x01\x12\x07\n\x02MH\x10\x8b\x01\x12\x07\n\x02MQ\x10\x8c\x01\x12\x07\n\x02MR\x10\x8d\x01\x12\x07\n\x02MU\x10\x8e\x01\x12\x07\n\x02YT\x10\x8f\x01\x12\x07\n\x02MX\x10\x90\x01\x12\x07\n\x02\x46M\x10\x91\x01\x12\x07\n\x02MD\x10\x92\x01\x12\x07\n\x02MC\x10\x93\x01\x12\x07\n\x02MN\x10\x94\x01\x12\x07\n\x02ME\x10\x95\x01\x12\x07\n\x02MS\x10\x96\x01\x12\x07\n\x02MA\x10\x97\x01\x12\x07\n\x02MZ\x10\x98\x01\x12\x07\n\x02MM\x10\x99\x01\x12\x07\n\x02NA\x10\x9a\x01\x12\x07\n\x02NR\x10\x9b\x01\x12\x07\n\x02NP\x10\x9c\x01\x12\x07\n\x02NL\x10\x9d\x01\x12\x07\n\x02NC\x10\x9e\x01\x12\x07\n\x02NZ\x10\x9f\x01\x12\x07\n\x02NI\x10\xa0\x01\x12\x07\n\x02NE\x10\xa1\x01\x12\x07\n\x02NG\x10\xa2\x01\x12\x07\n\x02NU\x10\xa3\x01\x12\x07\n\x02NF\x10\xa4\x01\x12\x07\n\x02MP\x10\xa5\x01\x12\x07\n\x02NO\x10\xa6\x01\x12\x07\n\x02OM\x10\xa7\x01\x12\x07\n\x02PK\x10\xa8\x01\x12\x07\n\x02PW\x10\xa9\x01\x12\x07\n\x02PS\x10\xaa\x01\x12\x07\n\x02PA\x10\xab\x01\x12\x07\n\x02PG\x10\xac\x01\x12\x07\n\x02PY\x10\xad\x01\x12\x07\n\x02PE\x10\xae\x01\x12\x07\n\x02PH\x10\xaf\x01\x12\x07\n\x02PN\x10\xb0\x01\x12\x07\n\x02PL\x10\xb1\x01\x12\x07\n\x02PT\x10\xb2\x01\x12\x07\n\x02PR\x10\xb3\x01\x12\x07\n\x02QA\x10\xb4\x01\x12\x07\n\x02RE\x10\xb5\x01\x12\x07\n\x02RO\x10\xb6\x01\x12\x07\n\x02RU\x10\xb7\x01\x12\x07\n\x02RW\x10\xb8\x01\x12\x07\n\x02\x42L\x10\xb9\x01\x12\x07\n\x02SH\x10\xba\x01\x12\x07\n\x02KN\x10\xbb\x01\x12\x07\n\x02LC\x10\xbc\x01\x12\x07\n\x02MF\x10\xbd\x01\x12\x07\n\x02PM\x10\xbe\x01\x12\x07\n\x02VC\x10\xbf\x01\x12\x07\n\x02WS\x10\xc0\x01\x12\x07\n\x02SM\x10\xc1\x01\x12\x07\n\x02ST\x10\xc2\x01\x12\x07\n\x02SA\x10\xc3\x01\x12\x07\n\x02SN\x10\xc4\x01\x12\x07\n\x02RS\x10\xc5\x01\x12\x07\n\x02SC\x10\xc6\x01\x12\x07\n\x02SL\x10\xc7\x01\x12\x07\n\x02SG\x10\xc8\x01\x12\x07\n\x02SX\x10\xc9\x01\x12\x07\n\x02SK\x10\xca\x01\x12\x07\n\x02SI\x10\xcb\x01\x12\x07\n\x02SB\x10\xcc\x01\x12\x07\n\x02SO\x10\xcd\x01\x12\x07\n\x02ZA\x10\xce\x01\x12\x07\n\x02GS\x10\xcf\x01\x12\x07\n\x02SS\x10\xd0\x01\x12\x07\n\x02\x45S\x10\xd1\x01\x12\x07\n\x02LK\x10\xd2\x01\x12\x07\n\x02SD\x10\xd3\x01\x12\x07\n\x02SR\x10\xd4\x01\x12\x07\n\x02SJ\x10\xd5\x01\x12\x07\n\x02SZ\x10\xd6\x01\x12\x07\n\x02SE\x10\xd7\x01\x12\x07\n\x02\x43H\x10\xd8\x01\x12\x07\n\x02SY\x10\xd9\x01\x12\x07\n\x02TW\x10\xda\x01\x12\x07\n\x02TJ\x10\xdb\x01\x12\x07\n\x02TZ\x10\xdc\x01\x12\x07\n\x02TH\x10\xdd\x01\x12\x07\n\x02TL\x10\xde\x01\x12\x07\n\x02TG\x10\xdf\x01\x12\x07\n\x02TK\x10\xe0\x01\x12\x07\n\x02TO\x10\xe1\x01\x12\x07\n\x02TT\x10\xe2\x01\x12\x07\n\x02TN\x10\xe3\x01\x12\x07\n\x02TR\x10\xe4\x01\x12\x07\n\x02TM\x10\xe5\x01\x12\x07\n\x02TC\x10\xe6\x01\x12\x07\n\x02TV\x10\xe7\x01\x12\x07\n\x02UG\x10\xe8\x01\x12\x07\n\x02UA\x10\xe9\x01\x12\x07\n\x02\x41\x45\x10\xea\x01\x12\x07\n\x02GB\x10\xeb\x01\x12\x07\n\x02US\x10\xec\x01\x12\x07\n\x02UM\x10\xed\x01\x12\x07\n\x02UY\x10\xee\x01\x12\x07\n\x02UZ\x10\xef\x01\x12\x07\n\x02VU\x10\xf0\x01\x12\x07\n\x02VE\x10\xf1\x01\x12\x07\n\x02VN\x10\xf2\x01\x12\x07\n\x02VG\x10\xf3\x01\x12\x07\n\x02VI\x10\xf4\x01\x12\x07\n\x02WF\x10\xf5\x01\x12\x07\n\x02\x45H\x10\xf6\x01\x12\x07\n\x02YE\x10\xf7\x01\x12\x07\n\x02ZM\x10\xf8\x01\x12\x07\n\x02ZW\x10\xf9\x01\x62\x06proto3')
)
_CLAIMLIST_LISTTYPE = _descriptor.EnumDescriptor(
name='ListType',
full_name='pb.ClaimList.ListType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='COLLECTION', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DERIVATION', index=1, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=852,
serialized_end=894,
)
_sym_db.RegisterEnumDescriptor(_CLAIMLIST_LISTTYPE)
_FEE_CURRENCY = _descriptor.EnumDescriptor(
name='Currency',
full_name='pb.Fee.Currency',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_CURRENCY', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LBC', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BTC', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='USD', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1075,
serialized_end=1134,
)
_sym_db.RegisterEnumDescriptor(_FEE_CURRENCY)
_SOFTWARE_OS = _descriptor.EnumDescriptor(
name='OS',
full_name='pb.Software.OS',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_OS', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANY', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LINUX', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WINDOWS', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MAC', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ANDROID', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IOS', index=6, number=6,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1311,
serialized_end=1395,
)
_sym_db.RegisterEnumDescriptor(_SOFTWARE_OS)
_LANGUAGE_LANGUAGE = _descriptor.EnumDescriptor(
name='Language',
full_name='pb.Language.Language',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_LANGUAGE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='en', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='aa', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ab', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ae', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='af', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ak', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='am', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='an', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ar', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='as', index=10, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='av', index=11, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ay', index=12, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='az', index=13, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ba', index=14, number=14,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='be', index=15, number=15,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='bg', index=16, number=16,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='bh', index=17, number=17,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='bi', index=18, number=18,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='bm', index=19, number=19,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='bn', index=20, number=20,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='bo', index=21, number=21,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='br', index=22, number=22,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='bs', index=23, number=23,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ca', index=24, number=24,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ce', index=25, number=25,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ch', index=26, number=26,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='co', index=27, number=27,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='cr', index=28, number=28,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='cs', index=29, number=29,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='cu', index=30, number=30,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='cv', index=31, number=31,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='cy', index=32, number=32,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='da', index=33, number=33,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='de', index=34, number=34,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='dv', index=35, number=35,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='dz', index=36, number=36,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ee', index=37, number=37,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='el', index=38, number=38,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='eo', index=39, number=39,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='es', index=40, number=40,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='et', index=41, number=41,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='eu', index=42, number=42,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='fa', index=43, number=43,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ff', index=44, number=44,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='fi', index=45, number=45,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='fj', index=46, number=46,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='fo', index=47, number=47,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='fr', index=48, number=48,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='fy', index=49, number=49,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ga', index=50, number=50,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='gd', index=51, number=51,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='gl', index=52, number=52,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='gn', index=53, number=53,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='gu', index=54, number=54,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='gv', index=55, number=55,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ha', index=56, number=56,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='he', index=57, number=57,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='hi', index=58, number=58,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ho', index=59, number=59,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='hr', index=60, number=60,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ht', index=61, number=61,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='hu', index=62, number=62,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='hy', index=63, number=63,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='hz', index=64, number=64,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ia', index=65, number=65,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='id', index=66, number=66,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ie', index=67, number=67,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ig', index=68, number=68,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ii', index=69, number=69,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ik', index=70, number=70,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='io', index=71, number=71,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='is', index=72, number=72,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='it', index=73, number=73,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='iu', index=74, number=74,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ja', index=75, number=75,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='jv', index=76, number=76,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ka', index=77, number=77,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='kg', index=78, number=78,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ki', index=79, number=79,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='kj', index=80, number=80,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='kk', index=81, number=81,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='kl', index=82, number=82,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='km', index=83, number=83,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='kn', index=84, number=84,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ko', index=85, number=85,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='kr', index=86, number=86,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ks', index=87, number=87,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ku', index=88, number=88,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='kv', index=89, number=89,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='kw', index=90, number=90,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ky', index=91, number=91,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='la', index=92, number=92,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='lb', index=93, number=93,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='lg', index=94, number=94,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='li', index=95, number=95,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ln', index=96, number=96,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='lo', index=97, number=97,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='lt', index=98, number=98,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='lu', index=99, number=99,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='lv', index=100, number=100,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='mg', index=101, number=101,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='mh', index=102, number=102,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='mi', index=103, number=103,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='mk', index=104, number=104,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ml', index=105, number=105,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='mn', index=106, number=106,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='mr', index=107, number=107,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ms', index=108, number=108,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='mt', index=109, number=109,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='my', index=110, number=110,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='na', index=111, number=111,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='nb', index=112, number=112,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='nd', index=113, number=113,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ne', index=114, number=114,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ng', index=115, number=115,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='nl', index=116, number=116,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='nn', index=117, number=117,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='no', index=118, number=118,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='nr', index=119, number=119,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='nv', index=120, number=120,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ny', index=121, number=121,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='oc', index=122, number=122,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='oj', index=123, number=123,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='om', index=124, number=124,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='or', index=125, number=125,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='os', index=126, number=126,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='pa', index=127, number=127,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='pi', index=128, number=128,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='pl', index=129, number=129,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ps', index=130, number=130,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='pt', index=131, number=131,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='qu', index=132, number=132,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='rm', index=133, number=133,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='rn', index=134, number=134,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ro', index=135, number=135,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ru', index=136, number=136,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='rw', index=137, number=137,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='sa', index=138, number=138,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='sc', index=139, number=139,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='sd', index=140, number=140,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='se', index=141, number=141,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='sg', index=142, number=142,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='si', index=143, number=143,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='sk', index=144, number=144,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='sl', index=145, number=145,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='sm', index=146, number=146,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='sn', index=147, number=147,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='so', index=148, number=148,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='sq', index=149, number=149,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='sr', index=150, number=150,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ss', index=151, number=151,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='st', index=152, number=152,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='su', index=153, number=153,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='sv', index=154, number=154,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='sw', index=155, number=155,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ta', index=156, number=156,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='te', index=157, number=157,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='tg', index=158, number=158,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='th', index=159, number=159,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ti', index=160, number=160,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='tk', index=161, number=161,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='tl', index=162, number=162,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='tn', index=163, number=163,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='to', index=164, number=164,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='tr', index=165, number=165,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ts', index=166, number=166,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='tt', index=167, number=167,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='tw', index=168, number=168,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ty', index=169, number=169,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ug', index=170, number=170,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='uk', index=171, number=171,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ur', index=172, number=172,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='uz', index=173, number=173,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ve', index=174, number=174,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='vi', index=175, number=175,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='vo', index=176, number=176,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='wa', index=177, number=177,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='wo', index=178, number=178,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='xh', index=179, number=179,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='yi', index=180, number=180,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='yo', index=181, number=181,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='za', index=182, number=182,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='zh', index=183, number=183,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='zu', index=184, number=184,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1527,
serialized_end=3088,
)
_sym_db.RegisterEnumDescriptor(_LANGUAGE_LANGUAGE)
_LANGUAGE_SCRIPT = _descriptor.EnumDescriptor(
name='Script',
full_name='pb.Language.Script',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_SCRIPT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Adlm', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Afak', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Aghb', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Ahom', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Arab', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Aran', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Armi', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Armn', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Avst', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Bali', index=10, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Bamu', index=11, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Bass', index=12, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Batk', index=13, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Beng', index=14, number=14,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Bhks', index=15, number=15,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Blis', index=16, number=16,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Bopo', index=17, number=17,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Brah', index=18, number=18,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Brai', index=19, number=19,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Bugi', index=20, number=20,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Buhd', index=21, number=21,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Cakm', index=22, number=22,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Cans', index=23, number=23,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Cari', index=24, number=24,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Cham', index=25, number=25,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Cher', index=26, number=26,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Cirt', index=27, number=27,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Copt', index=28, number=28,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Cpmn', index=29, number=29,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Cprt', index=30, number=30,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Cyrl', index=31, number=31,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Cyrs', index=32, number=32,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Deva', index=33, number=33,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Dogr', index=34, number=34,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Dsrt', index=35, number=35,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Dupl', index=36, number=36,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Egyd', index=37, number=37,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Egyh', index=38, number=38,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Egyp', index=39, number=39,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Elba', index=40, number=40,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Elym', index=41, number=41,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Ethi', index=42, number=42,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Geok', index=43, number=43,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Geor', index=44, number=44,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Glag', index=45, number=45,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Gong', index=46, number=46,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Gonm', index=47, number=47,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Goth', index=48, number=48,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Gran', index=49, number=49,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Grek', index=50, number=50,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Gujr', index=51, number=51,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Guru', index=52, number=52,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Hanb', index=53, number=53,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Hang', index=54, number=54,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Hani', index=55, number=55,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Hano', index=56, number=56,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Hans', index=57, number=57,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Hant', index=58, number=58,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Hatr', index=59, number=59,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Hebr', index=60, number=60,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Hira', index=61, number=61,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Hluw', index=62, number=62,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Hmng', index=63, number=63,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Hmnp', index=64, number=64,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Hrkt', index=65, number=65,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Hung', index=66, number=66,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Inds', index=67, number=67,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Ital', index=68, number=68,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Jamo', index=69, number=69,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Java', index=70, number=70,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Jpan', index=71, number=71,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Jurc', index=72, number=72,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Kali', index=73, number=73,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Kana', index=74, number=74,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Khar', index=75, number=75,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Khmr', index=76, number=76,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Khoj', index=77, number=77,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Kitl', index=78, number=78,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Kits', index=79, number=79,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Knda', index=80, number=80,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Kore', index=81, number=81,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Kpel', index=82, number=82,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Kthi', index=83, number=83,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Lana', index=84, number=84,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Laoo', index=85, number=85,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Latf', index=86, number=86,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Latg', index=87, number=87,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Latn', index=88, number=88,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Leke', index=89, number=89,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Lepc', index=90, number=90,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Limb', index=91, number=91,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Lina', index=92, number=92,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Linb', index=93, number=93,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Lisu', index=94, number=94,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Loma', index=95, number=95,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Lyci', index=96, number=96,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Lydi', index=97, number=97,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Mahj', index=98, number=98,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Maka', index=99, number=99,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Mand', index=100, number=100,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Mani', index=101, number=101,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Marc', index=102, number=102,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Maya', index=103, number=103,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Medf', index=104, number=104,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Mend', index=105, number=105,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Merc', index=106, number=106,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Mero', index=107, number=107,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Mlym', index=108, number=108,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Modi', index=109, number=109,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Mong', index=110, number=110,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Moon', index=111, number=111,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Mroo', index=112, number=112,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Mtei', index=113, number=113,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Mult', index=114, number=114,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Mymr', index=115, number=115,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Nand', index=116, number=116,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Narb', index=117, number=117,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Nbat', index=118, number=118,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Newa', index=119, number=119,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Nkdb', index=120, number=120,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Nkgb', index=121, number=121,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Nkoo', index=122, number=122,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Nshu', index=123, number=123,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Ogam', index=124, number=124,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Olck', index=125, number=125,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Orkh', index=126, number=126,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Orya', index=127, number=127,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Osge', index=128, number=128,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Osma', index=129, number=129,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Palm', index=130, number=130,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Pauc', index=131, number=131,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Perm', index=132, number=132,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Phag', index=133, number=133,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Phli', index=134, number=134,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Phlp', index=135, number=135,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Phlv', index=136, number=136,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Phnx', index=137, number=137,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Plrd', index=138, number=138,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Piqd', index=139, number=139,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Prti', index=140, number=140,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Qaaa', index=141, number=141,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Qabx', index=142, number=142,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Rjng', index=143, number=143,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Rohg', index=144, number=144,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Roro', index=145, number=145,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Runr', index=146, number=146,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Samr', index=147, number=147,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Sara', index=148, number=148,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Sarb', index=149, number=149,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Saur', index=150, number=150,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Sgnw', index=151, number=151,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Shaw', index=152, number=152,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Shrd', index=153, number=153,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Shui', index=154, number=154,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Sidd', index=155, number=155,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Sind', index=156, number=156,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Sinh', index=157, number=157,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Sogd', index=158, number=158,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Sogo', index=159, number=159,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Sora', index=160, number=160,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Soyo', index=161, number=161,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Sund', index=162, number=162,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Sylo', index=163, number=163,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Syrc', index=164, number=164,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Syre', index=165, number=165,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Syrj', index=166, number=166,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Syrn', index=167, number=167,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Tagb', index=168, number=168,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Takr', index=169, number=169,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Tale', index=170, number=170,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Talu', index=171, number=171,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Taml', index=172, number=172,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Tang', index=173, number=173,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Tavt', index=174, number=174,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Telu', index=175, number=175,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Teng', index=176, number=176,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Tfng', index=177, number=177,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Tglg', index=178, number=178,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Thaa', index=179, number=179,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Thai', index=180, number=180,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Tibt', index=181, number=181,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Tirh', index=182, number=182,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Ugar', index=183, number=183,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Vaii', index=184, number=184,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Visp', index=185, number=185,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Wara', index=186, number=186,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Wcho', index=187, number=187,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Wole', index=188, number=188,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Xpeo', index=189, number=189,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Xsux', index=190, number=190,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Yiii', index=191, number=191,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Zanb', index=192, number=192,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Zinh', index=193, number=193,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Zmth', index=194, number=194,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Zsye', index=195, number=195,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Zsym', index=196, number=196,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Zxxx', index=197, number=197,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Zyyy', index=198, number=198,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Zzzz', index=199, number=199,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=3091,
serialized_end=5181,
)
_sym_db.RegisterEnumDescriptor(_LANGUAGE_SCRIPT)
_LOCATION_COUNTRY = _descriptor.EnumDescriptor(
name='Country',
full_name='pb.Location.Country',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_COUNTRY', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AF', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AX', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AL', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DZ', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AS', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AO', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AI', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AQ', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AG', index=10, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AR', index=11, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AM', index=12, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AW', index=13, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AU', index=14, number=14,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AT', index=15, number=15,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AZ', index=16, number=16,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BS', index=17, number=17,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BH', index=18, number=18,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BD', index=19, number=19,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BB', index=20, number=20,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BY', index=21, number=21,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BE', index=22, number=22,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BZ', index=23, number=23,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BJ', index=24, number=24,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BM', index=25, number=25,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BT', index=26, number=26,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BO', index=27, number=27,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BQ', index=28, number=28,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BA', index=29, number=29,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BW', index=30, number=30,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BV', index=31, number=31,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BR', index=32, number=32,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IO', index=33, number=33,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BN', index=34, number=34,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BG', index=35, number=35,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BF', index=36, number=36,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BI', index=37, number=37,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='KH', index=38, number=38,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CM', index=39, number=39,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CA', index=40, number=40,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CV', index=41, number=41,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='KY', index=42, number=42,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CF', index=43, number=43,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TD', index=44, number=44,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CL', index=45, number=45,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CN', index=46, number=46,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CX', index=47, number=47,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CC', index=48, number=48,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CO', index=49, number=49,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='KM', index=50, number=50,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CG', index=51, number=51,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CD', index=52, number=52,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CK', index=53, number=53,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CR', index=54, number=54,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CI', index=55, number=55,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HR', index=56, number=56,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CU', index=57, number=57,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CW', index=58, number=58,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CY', index=59, number=59,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CZ', index=60, number=60,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DK', index=61, number=61,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DJ', index=62, number=62,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DM', index=63, number=63,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DO', index=64, number=64,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EC', index=65, number=65,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EG', index=66, number=66,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SV', index=67, number=67,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GQ', index=68, number=68,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ER', index=69, number=69,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EE', index=70, number=70,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ET', index=71, number=71,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FK', index=72, number=72,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FO', index=73, number=73,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FJ', index=74, number=74,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FI', index=75, number=75,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FR', index=76, number=76,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GF', index=77, number=77,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PF', index=78, number=78,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TF', index=79, number=79,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GA', index=80, number=80,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GM', index=81, number=81,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GE', index=82, number=82,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DE', index=83, number=83,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GH', index=84, number=84,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GI', index=85, number=85,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GR', index=86, number=86,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GL', index=87, number=87,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GD', index=88, number=88,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GP', index=89, number=89,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GU', index=90, number=90,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GT', index=91, number=91,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GG', index=92, number=92,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GN', index=93, number=93,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GW', index=94, number=94,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GY', index=95, number=95,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HT', index=96, number=96,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HM', index=97, number=97,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VA', index=98, number=98,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HN', index=99, number=99,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HK', index=100, number=100,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HU', index=101, number=101,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IS', index=102, number=102,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IN', index=103, number=103,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ID', index=104, number=104,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IR', index=105, number=105,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IQ', index=106, number=106,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IE', index=107, number=107,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IM', index=108, number=108,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IL', index=109, number=109,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IT', index=110, number=110,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JM', index=111, number=111,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JP', index=112, number=112,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JE', index=113, number=113,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JO', index=114, number=114,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='KZ', index=115, number=115,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='KE', index=116, number=116,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='KI', index=117, number=117,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='KP', index=118, number=118,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='KR', index=119, number=119,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='KW', index=120, number=120,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='KG', index=121, number=121,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LA', index=122, number=122,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LV', index=123, number=123,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LB', index=124, number=124,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LS', index=125, number=125,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LR', index=126, number=126,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LY', index=127, number=127,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LI', index=128, number=128,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LT', index=129, number=129,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LU', index=130, number=130,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MO', index=131, number=131,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MK', index=132, number=132,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MG', index=133, number=133,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MW', index=134, number=134,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MY', index=135, number=135,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MV', index=136, number=136,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ML', index=137, number=137,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MT', index=138, number=138,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MH', index=139, number=139,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MQ', index=140, number=140,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MR', index=141, number=141,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MU', index=142, number=142,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='YT', index=143, number=143,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MX', index=144, number=144,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FM', index=145, number=145,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MD', index=146, number=146,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MC', index=147, number=147,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MN', index=148, number=148,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ME', index=149, number=149,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MS', index=150, number=150,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MA', index=151, number=151,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MZ', index=152, number=152,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MM', index=153, number=153,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NA', index=154, number=154,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NR', index=155, number=155,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NP', index=156, number=156,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NL', index=157, number=157,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NC', index=158, number=158,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NZ', index=159, number=159,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NI', index=160, number=160,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NE', index=161, number=161,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NG', index=162, number=162,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NU', index=163, number=163,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NF', index=164, number=164,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MP', index=165, number=165,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NO', index=166, number=166,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OM', index=167, number=167,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PK', index=168, number=168,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PW', index=169, number=169,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PS', index=170, number=170,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PA', index=171, number=171,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PG', index=172, number=172,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PY', index=173, number=173,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PE', index=174, number=174,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PH', index=175, number=175,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PN', index=176, number=176,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PL', index=177, number=177,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PT', index=178, number=178,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PR', index=179, number=179,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QA', index=180, number=180,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RE', index=181, number=181,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RO', index=182, number=182,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RU', index=183, number=183,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RW', index=184, number=184,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BL', index=185, number=185,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SH', index=186, number=186,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='KN', index=187, number=187,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LC', index=188, number=188,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MF', index=189, number=189,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PM', index=190, number=190,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VC', index=191, number=191,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WS', index=192, number=192,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SM', index=193, number=193,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ST', index=194, number=194,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SA', index=195, number=195,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SN', index=196, number=196,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RS', index=197, number=197,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SC', index=198, number=198,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SL', index=199, number=199,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SG', index=200, number=200,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SX', index=201, number=201,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SK', index=202, number=202,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SI', index=203, number=203,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SB', index=204, number=204,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SO', index=205, number=205,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ZA', index=206, number=206,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GS', index=207, number=207,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SS', index=208, number=208,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ES', index=209, number=209,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LK', index=210, number=210,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SD', index=211, number=211,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SR', index=212, number=212,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SJ', index=213, number=213,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SZ', index=214, number=214,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SE', index=215, number=215,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CH', index=216, number=216,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SY', index=217, number=217,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TW', index=218, number=218,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TJ', index=219, number=219,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TZ', index=220, number=220,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TH', index=221, number=221,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TL', index=222, number=222,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TG', index=223, number=223,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TK', index=224, number=224,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TO', index=225, number=225,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TT', index=226, number=226,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TN', index=227, number=227,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TR', index=228, number=228,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TM', index=229, number=229,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TC', index=230, number=230,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TV', index=231, number=231,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UG', index=232, number=232,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UA', index=233, number=233,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AE', index=234, number=234,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GB', index=235, number=235,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='US', index=236, number=236,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UM', index=237, number=237,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UY', index=238, number=238,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UZ', index=239, number=239,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VU', index=240, number=240,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VE', index=241, number=241,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VN', index=242, number=242,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VG', index=243, number=243,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VI', index=244, number=244,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WF', index=245, number=245,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EH', index=246, number=246,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='YE', index=247, number=247,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ZM', index=248, number=248,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ZW', index=249, number=249,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=5316,
serialized_end=7460,
)
_sym_db.RegisterEnumDescriptor(_LOCATION_COUNTRY)
_CLAIM = _descriptor.Descriptor(
name='Claim',
full_name='pb.Claim',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='stream', full_name='pb.Claim.stream', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='channel', full_name='pb.Claim.channel', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='collection', full_name='pb.Claim.collection', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='repost', full_name='pb.Claim.repost', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='title', full_name='pb.Claim.title', index=4,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='pb.Claim.description', index=5,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='thumbnail', full_name='pb.Claim.thumbnail', index=6,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='pb.Claim.tags', index=7,
number=11, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='languages', full_name='pb.Claim.languages', index=8,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='locations', full_name='pb.Claim.locations', index=9,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='type', full_name='pb.Claim.type',
index=0, containing_type=None, fields=[]),
],
serialized_start=20,
serialized_end=319,
)
_STREAM = _descriptor.Descriptor(
name='Stream',
full_name='pb.Stream',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='pb.Stream.source', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='author', full_name='pb.Stream.author', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='license', full_name='pb.Stream.license', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='license_url', full_name='pb.Stream.license_url', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='release_time', full_name='pb.Stream.release_time', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fee', full_name='pb.Stream.fee', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image', full_name='pb.Stream.image', index=6,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='video', full_name='pb.Stream.video', index=7,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='audio', full_name='pb.Stream.audio', index=8,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='software', full_name='pb.Stream.software', index=9,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='type', full_name='pb.Stream.type',
index=0, containing_type=None, fields=[]),
],
serialized_start=322,
serialized_end=582,
)
_CHANNEL = _descriptor.Descriptor(
name='Channel',
full_name='pb.Channel',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='public_key', full_name='pb.Channel.public_key', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='email', full_name='pb.Channel.email', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='website_url', full_name='pb.Channel.website_url', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cover', full_name='pb.Channel.cover', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='featured', full_name='pb.Channel.featured', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=584,
serialized_end=709,
)
_CLAIMREFERENCE = _descriptor.Descriptor(
name='ClaimReference',
full_name='pb.ClaimReference',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='claim_hash', full_name='pb.ClaimReference.claim_hash', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=711,
serialized_end=747,
)
_CLAIMLIST = _descriptor.Descriptor(
name='ClaimList',
full_name='pb.ClaimList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='list_type', full_name='pb.ClaimList.list_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='claim_references', full_name='pb.ClaimList.claim_references', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_CLAIMLIST_LISTTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=750,
serialized_end=894,
)
_SOURCE = _descriptor.Descriptor(
name='Source',
full_name='pb.Source',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hash', full_name='pb.Source.hash', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='pb.Source.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='size', full_name='pb.Source.size', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='media_type', full_name='pb.Source.media_type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='url', full_name='pb.Source.url', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sd_hash', full_name='pb.Source.sd_hash', index=5,
number=6, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=896,
serialized_end=996,
)
_FEE = _descriptor.Descriptor(
name='Fee',
full_name='pb.Fee',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='currency', full_name='pb.Fee.currency', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='address', full_name='pb.Fee.address', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='amount', full_name='pb.Fee.amount', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_FEE_CURRENCY,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=999,
serialized_end=1134,
)
_IMAGE = _descriptor.Descriptor(
name='Image',
full_name='pb.Image',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='width', full_name='pb.Image.width', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='height', full_name='pb.Image.height', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1136,
serialized_end=1174,
)
_VIDEO = _descriptor.Descriptor(
name='Video',
full_name='pb.Video',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='width', full_name='pb.Video.width', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='height', full_name='pb.Video.height', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='duration', full_name='pb.Video.duration', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='audio', full_name='pb.Video.audio', index=3,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1176,
serialized_end=1258,
)
_AUDIO = _descriptor.Descriptor(
name='Audio',
full_name='pb.Audio',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='duration', full_name='pb.Audio.duration', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1260,
serialized_end=1285,
)
_SOFTWARE = _descriptor.Descriptor(
name='Software',
full_name='pb.Software',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='os', full_name='pb.Software.os', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_SOFTWARE_OS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1287,
serialized_end=1395,
)
_LANGUAGE = _descriptor.Descriptor(
name='Language',
full_name='pb.Language',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='language', full_name='pb.Language.language', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='script', full_name='pb.Language.script', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='region', full_name='pb.Language.region', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_LANGUAGE_LANGUAGE,
_LANGUAGE_SCRIPT,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1398,
serialized_end=5181,
)
_LOCATION = _descriptor.Descriptor(
name='Location',
full_name='pb.Location',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='country', full_name='pb.Location.country', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='pb.Location.state', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='city', full_name='pb.Location.city', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='code', full_name='pb.Location.code', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='latitude', full_name='pb.Location.latitude', index=4,
number=5, type=17, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='longitude', full_name='pb.Location.longitude', index=5,
number=6, type=17, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_LOCATION_COUNTRY,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5184,
serialized_end=7460,
)
_CLAIM.fields_by_name['stream'].message_type = _STREAM
_CLAIM.fields_by_name['channel'].message_type = _CHANNEL
_CLAIM.fields_by_name['collection'].message_type = _CLAIMLIST
_CLAIM.fields_by_name['repost'].message_type = _CLAIMREFERENCE
_CLAIM.fields_by_name['thumbnail'].message_type = _SOURCE
_CLAIM.fields_by_name['languages'].message_type = _LANGUAGE
_CLAIM.fields_by_name['locations'].message_type = _LOCATION
_CLAIM.oneofs_by_name['type'].fields.append(
_CLAIM.fields_by_name['stream'])
_CLAIM.fields_by_name['stream'].containing_oneof = _CLAIM.oneofs_by_name['type']
_CLAIM.oneofs_by_name['type'].fields.append(
_CLAIM.fields_by_name['channel'])
_CLAIM.fields_by_name['channel'].containing_oneof = _CLAIM.oneofs_by_name['type']
_CLAIM.oneofs_by_name['type'].fields.append(
_CLAIM.fields_by_name['collection'])
_CLAIM.fields_by_name['collection'].containing_oneof = _CLAIM.oneofs_by_name['type']
_CLAIM.oneofs_by_name['type'].fields.append(
_CLAIM.fields_by_name['repost'])
_CLAIM.fields_by_name['repost'].containing_oneof = _CLAIM.oneofs_by_name['type']
_STREAM.fields_by_name['source'].message_type = _SOURCE
_STREAM.fields_by_name['fee'].message_type = _FEE
_STREAM.fields_by_name['image'].message_type = _IMAGE
_STREAM.fields_by_name['video'].message_type = _VIDEO
_STREAM.fields_by_name['audio'].message_type = _AUDIO
_STREAM.fields_by_name['software'].message_type = _SOFTWARE
_STREAM.oneofs_by_name['type'].fields.append(
_STREAM.fields_by_name['image'])
_STREAM.fields_by_name['image'].containing_oneof = _STREAM.oneofs_by_name['type']
_STREAM.oneofs_by_name['type'].fields.append(
_STREAM.fields_by_name['video'])
_STREAM.fields_by_name['video'].containing_oneof = _STREAM.oneofs_by_name['type']
_STREAM.oneofs_by_name['type'].fields.append(
_STREAM.fields_by_name['audio'])
_STREAM.fields_by_name['audio'].containing_oneof = _STREAM.oneofs_by_name['type']
_STREAM.oneofs_by_name['type'].fields.append(
_STREAM.fields_by_name['software'])
_STREAM.fields_by_name['software'].containing_oneof = _STREAM.oneofs_by_name['type']
_CHANNEL.fields_by_name['cover'].message_type = _SOURCE
_CHANNEL.fields_by_name['featured'].message_type = _CLAIMLIST
_CLAIMLIST.fields_by_name['list_type'].enum_type = _CLAIMLIST_LISTTYPE
_CLAIMLIST.fields_by_name['claim_references'].message_type = _CLAIMREFERENCE
_CLAIMLIST_LISTTYPE.containing_type = _CLAIMLIST
_FEE.fields_by_name['currency'].enum_type = _FEE_CURRENCY
_FEE_CURRENCY.containing_type = _FEE
_VIDEO.fields_by_name['audio'].message_type = _AUDIO
_SOFTWARE_OS.containing_type = _SOFTWARE
_LANGUAGE.fields_by_name['language'].enum_type = _LANGUAGE_LANGUAGE
_LANGUAGE.fields_by_name['script'].enum_type = _LANGUAGE_SCRIPT
_LANGUAGE.fields_by_name['region'].enum_type = _LOCATION_COUNTRY
_LANGUAGE_LANGUAGE.containing_type = _LANGUAGE
_LANGUAGE_SCRIPT.containing_type = _LANGUAGE
_LOCATION.fields_by_name['country'].enum_type = _LOCATION_COUNTRY
_LOCATION_COUNTRY.containing_type = _LOCATION
DESCRIPTOR.message_types_by_name['Claim'] = _CLAIM
DESCRIPTOR.message_types_by_name['Stream'] = _STREAM
DESCRIPTOR.message_types_by_name['Channel'] = _CHANNEL
DESCRIPTOR.message_types_by_name['ClaimReference'] = _CLAIMREFERENCE
DESCRIPTOR.message_types_by_name['ClaimList'] = _CLAIMLIST
DESCRIPTOR.message_types_by_name['Source'] = _SOURCE
DESCRIPTOR.message_types_by_name['Fee'] = _FEE
DESCRIPTOR.message_types_by_name['Image'] = _IMAGE
DESCRIPTOR.message_types_by_name['Video'] = _VIDEO
DESCRIPTOR.message_types_by_name['Audio'] = _AUDIO
DESCRIPTOR.message_types_by_name['Software'] = _SOFTWARE
DESCRIPTOR.message_types_by_name['Language'] = _LANGUAGE
DESCRIPTOR.message_types_by_name['Location'] = _LOCATION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Claim = _reflection.GeneratedProtocolMessageType('Claim', (_message.Message,), dict(
DESCRIPTOR = _CLAIM,
__module__ = 'claim_pb2'
# @@protoc_insertion_point(class_scope:pb.Claim)
))
_sym_db.RegisterMessage(Claim)
Stream = _reflection.GeneratedProtocolMessageType('Stream', (_message.Message,), dict(
DESCRIPTOR = _STREAM,
__module__ = 'claim_pb2'
# @@protoc_insertion_point(class_scope:pb.Stream)
))
_sym_db.RegisterMessage(Stream)
Channel = _reflection.GeneratedProtocolMessageType('Channel', (_message.Message,), dict(
DESCRIPTOR = _CHANNEL,
__module__ = 'claim_pb2'
# @@protoc_insertion_point(class_scope:pb.Channel)
))
_sym_db.RegisterMessage(Channel)
ClaimReference = _reflection.GeneratedProtocolMessageType('ClaimReference', (_message.Message,), dict(
DESCRIPTOR = _CLAIMREFERENCE,
__module__ = 'claim_pb2'
# @@protoc_insertion_point(class_scope:pb.ClaimReference)
))
_sym_db.RegisterMessage(ClaimReference)
ClaimList = _reflection.GeneratedProtocolMessageType('ClaimList', (_message.Message,), dict(
DESCRIPTOR = _CLAIMLIST,
__module__ = 'claim_pb2'
# @@protoc_insertion_point(class_scope:pb.ClaimList)
))
_sym_db.RegisterMessage(ClaimList)
Source = _reflection.GeneratedProtocolMessageType('Source', (_message.Message,), dict(
DESCRIPTOR = _SOURCE,
__module__ = 'claim_pb2'
# @@protoc_insertion_point(class_scope:pb.Source)
))
_sym_db.RegisterMessage(Source)
Fee = _reflection.GeneratedProtocolMessageType('Fee', (_message.Message,), dict(
DESCRIPTOR = _FEE,
__module__ = 'claim_pb2'
# @@protoc_insertion_point(class_scope:pb.Fee)
))
_sym_db.RegisterMessage(Fee)
Image = _reflection.GeneratedProtocolMessageType('Image', (_message.Message,), dict(
DESCRIPTOR = _IMAGE,
__module__ = 'claim_pb2'
# @@protoc_insertion_point(class_scope:pb.Image)
))
_sym_db.RegisterMessage(Image)
Video = _reflection.GeneratedProtocolMessageType('Video', (_message.Message,), dict(
DESCRIPTOR = _VIDEO,
__module__ = 'claim_pb2'
# @@protoc_insertion_point(class_scope:pb.Video)
))
_sym_db.RegisterMessage(Video)
Audio = _reflection.GeneratedProtocolMessageType('Audio', (_message.Message,), dict(
DESCRIPTOR = _AUDIO,
__module__ = 'claim_pb2'
# @@protoc_insertion_point(class_scope:pb.Audio)
))
_sym_db.RegisterMessage(Audio)
Software = _reflection.GeneratedProtocolMessageType('Software', (_message.Message,), dict(
DESCRIPTOR = _SOFTWARE,
__module__ = 'claim_pb2'
# @@protoc_insertion_point(class_scope:pb.Software)
))
_sym_db.RegisterMessage(Software)
Language = _reflection.GeneratedProtocolMessageType('Language', (_message.Message,), dict(
DESCRIPTOR = _LANGUAGE,
__module__ = 'claim_pb2'
# @@protoc_insertion_point(class_scope:pb.Language)
))
_sym_db.RegisterMessage(Language)
Location = _reflection.GeneratedProtocolMessageType('Location', (_message.Message,), dict(
DESCRIPTOR = _LOCATION,
__module__ = 'claim_pb2'
# @@protoc_insertion_point(class_scope:pb.Location)
))
_sym_db.RegisterMessage(Location)
# @@protoc_insertion_point(module_scope)
| mit |
Azimut-Prod/azimut-deploy | server.py | 1 | 3849 | from fabric.api import *
from fabric.contrib.files import upload_template, append
import time
import config
@task
def uname():
"""Execute uname"""
run("uname -a")
@task
def upgrade():
"""Upgrade a sever"""
sudo("DEBIAN_FRONTEND=noninteractive apt-get update -y")
sudo("DEBIAN_FRONTEND=noninteractive apt-get upgrade -y")
sudo("DEBIAN_FRONTEND=noninteractive apt-get dist-upgrade -y")
@task
def install_sudo():
"""Install the sudo programm. Need to be runned with root"""
run("apt-get update")
run("apt-get install -y sudo")
@task
def reboot():
"""Reboot a machine"""
x = 5
while x > 0:
print "Rebooting", env.host, "in", x, "seconds..."
time.sleep(1)
x -= 1
sudo("reboot")
@task
def shutdown():
"""Shutdown a machine"""
x = 5
while x > 0:
print "Shutdowning", env.host, "in", x, "seconds..."
time.sleep(1)
x -= 1
sudo("halt")
@task
def copy_key_manager():
"""Copy the script for keymanagement [$AG:NeedKM]"""
if not hasattr(env, 'keymanagerName') or env.keymanagerName == '':
print "No keymanager name !"
return
upload_template('files/updateKeys.sh', '/root/updateKeys.sh', {
'server': env.keymanagerName,
'users': env.keyManagerUsers,
'gestion_adresse': config.GESTION_ADDRESS,
}, use_sudo=True)
sudo("chmod +x /root/updateKeys.sh")
@task
def cron_key_manager():
"""Install the crontab for the keymanagement"""
sudo('touch /tmp/crondump')
with settings(warn_only=True):
sudo('crontab -l > /tmp/crondump')
append('/tmp/crondump', '42 * * * * /root/updateKeys.sh', use_sudo=True)
sudo('crontab /tmp/crondump')
@task
def setup_key_manager():
"""Setup the key manager [$AG:NeedKM]"""
run('mkdir -p ~/.ssh/')
sudo('apt-get install -y ca-certificates')
copy_key_manager()
cron_key_manager()
execute_key_manger()
@task
def execute_key_manger():
"""Execute the keyManager"""
sudo("/root/updateKeys.sh")
@task
def copy_config():
"""Copy config files"""
put(config.AZIMUT_CONFIG + '/.vim*', '~')
put(config.AZIMUT_CONFIG + '/.screenrc', '~')
put(config.AZIMUT_CONFIG + '/.zsh*', '~')
@task
def copy_user_config():
"""Copy the config for a user [$AG:NeedUser]"""
if not hasattr(env, 'fab_user') or env.fab_user == '':
return
put(config.AZIMUT_CONFIG + '/.vim*', '/home/' + env.fab_user + '/')
put(config.AZIMUT_CONFIG + '/.screenrc', '/home/' + env.fab_user + '/')
put(config.AZIMUT_CONFIG + '/.zsh*', '/home/' + env.fab_user + '/')
put(config.AZIMUT_CONFIG + '/.zshrc-user', '/home/' + env.fab_user + '/.zshrc')
@task
def install_base_progs():
"""Install base programms"""
sudo('apt-get install -y zsh screen vim')
@task
def switch_shell_to_zsh():
"""Change the shell to ZSH"""
run('chsh -s /bin/zsh')
@task
def install_rsync():
"""Install rsync"""
sudo("apt-get install rsync")
@task
def add_gestion_for_self_vms():
"""Add a host for gestion vm so they can access the server even if on the same server [$AG:NeedGestion]"""
if not hasattr(env, 'gestion_ip') or env.gestion_ip == '':
return
sudo('echo "' + env.gestion_ip + ' ' + env.gestion_name + '" >> /etc/hosts')
@task
def setup():
"""Setup a new server [$AG:NeedKM][$AG:NeedGestion]"""
execute(install_sudo)
execute(upgrade)
execute(install_base_progs)
execute(add_gestion_for_self_vms)
execute(copy_config)
execute(switch_shell_to_zsh)
execute(install_rsync)
if not hasattr(env, 'keymanagerName') or env.keymanagerName == '':
prompt("Key manager name ?", 'keymanagerName')
prompt("Key manager users ?", 'keyManagerUsers', 'root')
execute(setup_key_manager)
| mit |
lastweek/gem5 | configs/topologies/BaseTopology.py | 15 | 2949 | # Copyright (c) 2012 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Jason Power
import m5
class BaseTopology(object):
description = "BaseTopology"
def __init__(self):
""" When overriding place any objects created in
configs/ruby/<protocol>.py that are needed in
makeTopology (below) here. The minimum is usually
all of the controllers created in the above file.
"""
def makeTopology(self, options, IntLink, ExtLink, Router):
""" Called from configs/ruby/Ruby.py
The return value is ( list(Router), list(IntLink), list(ExtLink))
The API of this function cannot change when subclassing!!
Any additional information needed to create this topology should
be passed into the constructor when it's instantiated in
configs/ruby/<protocol>.py
"""
m5.util.fatal("BaseTopology should have been overridden!!")
class SimpleTopology(BaseTopology):
""" Provides methods needed for the topologies included in Ruby before
topology changes.
These topologies are "simple" in the sense that they only use a flat
list of controllers to construct the topology.
"""
description = "SimpleTopology"
def __init__(self, controllers):
self.nodes = controllers
def addController(self, controller):
self.nodes.append(controller)
def __len__(self):
return len(self.nodes)
| bsd-3-clause |
user-none/calibre | src/calibre/devices/hanlin/driver.py | 19 | 4550 | # -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '2009, Tijmen Ruizendaal <tijmen at mybebook.com>'
__docformat__ = 'restructuredtext en'
'''
Device driver for Hanlin
'''
import re
from calibre.devices.usbms.driver import USBMS
class HANLINV3(USBMS):
name = 'Hanlin V3 driver'
gui_name = 'Hanlin V3'
description = _('Communicate with Hanlin V3 eBook readers.')
author = 'Tijmen Ruizendaal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'mobi', 'fb2', 'lit', 'prc', 'pdf', 'rtf', 'txt']
VENDOR_ID = [0x0525]
PRODUCT_ID = [0x8803, 0x6803]
BCD = [0x312]
VENDOR_NAME = 'LINUX'
WINDOWS_MAIN_MEM = 'FILE-STOR_GADGET'
WINDOWS_CARD_A_MEM = 'FILE-STOR_GADGET'
OSX_MAIN_MEM = 'Linux File-Stor Gadget Media'
OSX_CARD_A_MEM = 'Linux File-Stor Gadget Media'
MAIN_MEMORY_VOLUME_LABEL = 'Hanlin V3 Internal Memory'
STORAGE_CARD_VOLUME_LABEL = 'Hanlin V3 Storage Card'
SUPPORTS_SUB_DIRS = True
def osx_sort_names(self, names):
main = names.get('main', None)
card = names.get('carda', None)
try:
main_num = int(re.findall('\d+', main)[0]) if main else None
except:
main_num = None
try:
card_num = int(re.findall('\d+', card)[0]) if card else None
except:
card_num = None
if card_num is not None and main_num is not None and card_num > main_num:
names['main'] = card
names['carda'] = main
if card and not main:
names['main'] = card
names['carda'] = None
return names
def linux_swap_drives(self, drives):
if len(drives) < 2 or not drives[0] or not drives[1]: return drives
drives = list(drives)
t = drives[0]
drives[0] = drives[1]
drives[1] = t
return tuple(drives)
def windows_sort_drives(self, drives):
if len(drives) < 2: return drives
main = drives.get('main', None)
carda = drives.get('carda', None)
if main and carda:
drives['main'] = carda
drives['carda'] = main
return drives
class SPECTRA(HANLINV3):
name = 'Spectra'
gui_name = 'Spectra'
PRODUCT_ID = [0xa4a5]
FORMATS = ['epub', 'mobi', 'fb2', 'lit', 'prc', 'chm', 'djvu', 'pdf', 'rtf', 'txt']
SUPPORTS_SUB_DIRS = True
class HANLINV5(HANLINV3):
name = 'Hanlin V5 driver'
gui_name = 'Hanlin V5'
description = _('Communicate with Hanlin V5 eBook readers.')
VENDOR_ID = [0x0492]
PRODUCT_ID = [0x8813]
BCD = [0x319]
OSX_MAIN_MEM = 'Hanlin V5 Internal Memory'
OSX_CARD_MEM = 'Hanlin V5 Storage Card'
MAIN_MEMORY_VOLUME_LABEL = 'Hanlin V5 Internal Memory'
STORAGE_CARD_VOLUME_LABEL = 'Hanlin V5 Storage Card'
OSX_EJECT_COMMAND = ['diskutil', 'unmount', 'force']
class BOOX(HANLINV3):
name = 'BOOX driver'
gui_name = 'BOOX'
description = _('Communicate with the BOOX eBook reader.')
author = 'Jesus Manuel Marinho Valcarce'
supported_platforms = ['windows', 'osx', 'linux']
METADATA_CACHE = '.metadata.calibre'
DRIVEINFO = '.driveinfo.calibre'
icon = I('devices/boox.jpg')
# Ordered list of supported formats
FORMATS = ['epub', 'fb2', 'djvu', 'pdf', 'html', 'txt', 'rtf', 'mobi',
'prc', 'chm', 'doc']
VENDOR_ID = [0x0525]
PRODUCT_ID = [0xa4a5]
BCD = [0x322, 0x323, 0x326]
MAIN_MEMORY_VOLUME_LABEL = 'BOOX Internal Memory'
STORAGE_CARD_VOLUME_LABEL = 'BOOX Storage Card'
EBOOK_DIR_MAIN = ['MyBooks']
EXTRA_CUSTOMIZATION_MESSAGE = _('Comma separated list of directories to '
'send e-books to on the device. The first one that exists will '
'be used.')
EXTRA_CUSTOMIZATION_DEFAULT = ', '.join(EBOOK_DIR_MAIN)
# EBOOK_DIR_CARD_A = 'MyBooks' ## Am quite sure we need this.
def post_open_callback(self):
opts = self.settings()
dirs = opts.extra_customization
if not dirs:
dirs = self.EBOOK_DIR_MAIN
else:
dirs = [x.strip() for x in dirs.split(',')]
self.EBOOK_DIR_MAIN = dirs
def windows_sort_drives(self, drives):
return drives
def osx_sort_names(self, names):
return names
def linux_swap_drives(self, drives):
return drives
| gpl-3.0 |
jeremydane/Info3180-Project4 | server/lib/flask/__init__.py | 4 | 1722 | # -*- coding: utf-8 -*-
"""
flask
~~~~~
A microframework based on Werkzeug. It's extensively documented
and follows best practice patterns.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
__version__ = '0.10'
# utilities we import from Werkzeug and Jinja2 that are unused
# in the module but are exported as public interface.
from werkzeug.exceptions import abort
from werkzeug.utils import redirect
from jinja2 import Markup, escape
from .app import Flask, Request, Response
from .config import Config
from .helpers import url_for, flash, send_file, send_from_directory, \
get_flashed_messages, get_template_attribute, make_response, safe_join, \
stream_with_context
from .globals import current_app, g, request, session, _request_ctx_stack, \
_app_ctx_stack
from .ctx import has_request_context, has_app_context, \
after_this_request, copy_current_request_context
from .module import Module
from .blueprints import Blueprint
from .templating import render_template, render_template_string
# the signals
from .signals import signals_available, template_rendered, request_started, \
request_finished, got_request_exception, request_tearing_down, \
appcontext_tearing_down, appcontext_pushed, \
appcontext_popped, message_flashed
# We're not exposing the actual json module but a convenient wrapper around
# it.
from . import json
# This was the only thing that flask used to export at one point and it had
# a more generic name.
jsonify = json.jsonify
# backwards compat, goes away in 1.0
from .sessions import SecureCookieSession as Session
json_available = True
| apache-2.0 |
devs4v/devs4v-information-retrieval15 | project/venv/lib/python2.7/site-packages/django/utils/html_parser.py | 82 | 5221 | import re
import sys
from django.utils.six.moves import html_parser as _html_parser
current_version = sys.version_info
use_workaround = (
(current_version < (2, 7, 3)) or
(current_version >= (3, 0) and current_version < (3, 2, 3))
)
try:
HTMLParseError = _html_parser.HTMLParseError
except AttributeError:
# create a dummy class for Python 3.5+ where it's been removed
class HTMLParseError(Exception):
pass
if not use_workaround:
if current_version >= (3, 4):
class HTMLParser(_html_parser.HTMLParser):
"""Explicitly set convert_charrefs to be False.
This silences a deprecation warning on Python 3.4, but we can't do
it at call time because Python 2.7 does not have the keyword
argument.
"""
def __init__(self, convert_charrefs=False, **kwargs):
_html_parser.HTMLParser.__init__(self, convert_charrefs=convert_charrefs, **kwargs)
else:
HTMLParser = _html_parser.HTMLParser
else:
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
class HTMLParser(_html_parser.HTMLParser):
"""
Patched version of stdlib's HTMLParser with patch from:
http://bugs.python.org/issue670664
"""
def __init__(self):
_html_parser.HTMLParser.__init__(self)
self.cdata_tag = None
def set_cdata_mode(self, tag):
try:
self.interesting = _html_parser.interesting_cdata
except AttributeError:
self.interesting = re.compile(r'</\s*%s\s*>' % tag.lower(), re.I)
self.cdata_tag = tag.lower()
def clear_cdata_mode(self):
self.interesting = _html_parser.interesting_normal
self.cdata_tag = None
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i + 1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = _html_parser.attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif (attrvalue[:1] == '\'' == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = (len(self.__starttag_text)
- self.__starttag_text.rfind("\n"))
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag) # <--------------------------- Changed
return endpos
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i + 2] == "</", "unexpected call to parse_endtag"
match = _html_parser.endendtag.search(rawdata, i + 1) # >
if not match:
return -1
j = match.end()
match = _html_parser.endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_tag is not None: # *** add ***
self.handle_data(rawdata[i:j]) # *** add ***
return j # *** add ***
self.error("bad end tag: %r" % (rawdata[i:j],))
# --- changed start ---------------------------------------------------
tag = match.group(1).strip()
if self.cdata_tag is not None:
if tag.lower() != self.cdata_tag:
self.handle_data(rawdata[i:j])
return j
# --- changed end -----------------------------------------------------
self.handle_endtag(tag.lower())
self.clear_cdata_mode()
return j
| mit |
scutwukai/smartpool | poolmysql.py | 1 | 6389 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import MySQLdb
from datetime import datetime, timedelta
from contextlib import contextmanager, closing
from base.smartpool import Connection
query_logger = None
__all__ = [
"transaction",
"lock_str",
"MySQLdbConnection",
]
#################
def total_seconds(td):
return td.days * 60 * 60 * 24 + td.seconds
def log(ident, msg, logger):
if logger is None:
return
logger("%d - %s" % (ident, msg))
def qlog(conn, msg):
cid = id(conn)
log(cid, msg, query_logger)
def active_time(old_handler):
def new_handler(self, *args, **kwargs):
self._last_active_time = datetime.now()
ret = old_handler(self, *args, **kwargs)
self._last_active_time = datetime.now()
return ret
return new_handler
def ready(old_handler):
def new_handler(self, *args, **kwargs):
if self._conn is None and self.reusable:
self.connect()
return old_handler(self, *args, **kwargs)
return new_handler
#######################
class Job(object):
"""
A indicator to mark whether the job is finished.
"""
def __init__(self):
self._finished = False
def is_finished(self):
return self._finished
def finish(self):
self._finished = True
@contextmanager
def transaction(conn):
"""
Automatic handle transaction COMMIT/ROLLBACK. You MUST call trans.finish(),
if you want to COMMIT; Otherwise(not call or exception occurs), ROLLBACK.
>>> with transaction(conn) as trans:
>>> do something...
>>> if xxxxx:
>>> # if you don't want to commit, you just not call trans.finish().
>>> return error_page("xxxxxx")
>>> # if you want to commit, you call:
>>> trans.finish()
@param conn: database connection
"""
trans = Job()
conn.begin()
try:
yield trans
except:
conn.rollback()
raise
if trans.is_finished():
conn.commit()
else:
conn.rollback()
@contextmanager
def lock_str(conn, s, timeout=0):
"""
Automatic handle lock/release a database string lock.
>>> with lock_str(conn, s, timeout) as locked:
>>> if not locked:
>>> # lock 's' failed
>>> do something
>>> # after the block, the lock will be automatic released
@param conn: database connection
@param s: the string wanted to lock
@param timeout: how many seconds to wait for getting the lock
"""
locked = False
try:
locked = conn.lock(s, timeout)
yield locked
finally:
if locked:
conn.release(s)
###########################################
class MySQLdbConnection(Connection):
def __init__(self, **db_config):
super(MySQLdbConnection, self).__init__(**db_config)
# private
self._conn = None
self._locks = []
self._in_trans = False
self._last_active_time = None
def __deepcopy__(self, memo):
return self
@property
def in_trans(self):
return self._in_trans
@property
def has_lock(self):
return len(self._locks) > 0
################ pool interface #################
@property
def reusable(self):
return not (self.in_trans or self.has_lock)
@property
def idle(self):
if self._last_active_time is None:
return 0
nowtime = datetime.now()
return total_seconds(nowtime - self._last_active_time)
def ping(self):
qlog(self, "ping")
if self._conn is None:
return False
try:
self._conn.ping()
return True
except MySQLdb.DatabaseError:
return False
@active_time
def connect(self):
qlog(self, "connect")
self._conn = MySQLdb.connect(**self._db_config)
self._conn.autocommit(True)
def close(self):
qlog(self, "close")
if self._conn is None:
return
try:
self._conn.close()
finally:
self._conn = None
def make_reusable(self):
if self.in_trans:
self.rollback()
if self.has_lock:
for key in self._locks:
self.release(key)
############## base dbop #############
@ready
@active_time
def select(self, sql, params=None, dict_cursor=False):
qlog(self, "execute: %s - %s" % (sql, repr(params)))
cursor = None
if dict_cursor:
cursor = self._conn.cursor(MySQLdb.cursors.DictCursor)
else:
cursor = self._conn.cursor()
with closing(cursor) as cur:
if params is None:
cur.execute(sql)
else:
cur.execute(sql, params)
return cur.fetchall()
@ready
@active_time
def insert(self, sql, params=None):
qlog(self, "execute: %s - %s" % (sql, repr(params)))
cursor = self._conn.cursor()
with closing(cursor) as cur:
if params is None:
cur.execute(sql)
else:
cur.execute(sql, params)
return cur.lastrowid
@ready
@active_time
def execute(self, sql, params=None):
qlog(self, "execute: %s - %s" % (sql, repr(params)))
cursor = self._conn.cursor()
with closing(cursor) as cur:
if params is None:
return cur.execute(sql)
else:
return cur.execute(sql, params)
############### unreusable #############
def begin(self):
if self._in_trans:
raise Exception("nested trans is not allowed")
self.execute("begin")
self._in_trans = True
def rollback(self):
self.execute("rollback")
self._in_trans = False
def commit(self):
self.execute("commit")
self._in_trans = False
def lock(self, key, timeout=0):
locked = self.select("SELECT GET_LOCK(%s, %s)", (key, timeout))[0][0] == 1
if locked:
self._locks.append(key)
return locked
def release(self, key):
released = self.select("SELECT RELEASE_LOCK(%s)", (key, ))[0][0] == 1
if released and key in self._locks:
self._locks.remove(key)
return released
| mit |
MeGotsThis/BotGotsThis | tests/cache/test_ffz_api.py | 1 | 5157 | from asynctest import patch
import bot # noqa: F401
from .base_cache_store import TestCacheStore
class TestCacheFfzApiGlobalEmotes(TestCacheStore):
async def setUp(self):
await super().setUp()
patcher = patch('lib.api.ffz.getGlobalEmotes')
self.addCleanup(patcher.stop)
self.mock_emotes = patcher.start()
self.mock_emotes.return_value = {3: 'BeanieHipster'}
async def test_load(self):
self.assertIs(await self.data.ffz_load_global_emotes(), True)
self.assertTrue(self.mock_emotes.called)
self.mock_emotes.reset_mock()
self.assertIs(await self.data.ffz_load_global_emotes(), True)
self.assertFalse(self.mock_emotes.called)
self.assertIsNotNone(
await self.redis.get(self.data._ffzGlobalEmoteKey()))
async def test_load_background(self):
self.assertIs(await self.data.ffz_load_global_emotes(background=True),
True)
self.assertTrue(self.mock_emotes.called)
self.data.redis.expire(self.data._ffzGlobalEmoteKey(), 5)
self.mock_emotes.reset_mock()
self.assertIs(await self.data.ffz_load_global_emotes(background=True),
True)
self.assertTrue(self.mock_emotes.called)
self.assertIsNotNone(
await self.redis.get(self.data._ffzGlobalEmoteKey()))
async def test_load_none(self):
self.mock_emotes.return_value = None
self.assertIs(await self.data.ffz_load_global_emotes(), False)
self.assertTrue(self.mock_emotes.called)
self.mock_emotes.reset_mock()
self.assertIs(await self.data.ffz_load_global_emotes(), False)
self.assertTrue(self.mock_emotes.called)
self.assertIsNone(await self.redis.get(self.data._ffzGlobalEmoteKey()))
async def test_save(self):
self.assertIs(
await self.data.ffz_save_global_emotes({3: 'BeanieHipster'}), True)
self.assertIsNotNone(
await self.redis.get(self.data._ffzGlobalEmoteKey()))
async def test_get(self):
await self.data.ffz_save_global_emotes({3: 'BeanieHipster'})
self.assertEqual(await self.data.ffz_get_global_emotes(),
{3: 'BeanieHipster'})
async def test_get_empty(self):
self.assertIsNone(await self.data.ffz_get_global_emotes())
class TestCacheFfzApiBroadcasterEmotes(TestCacheStore):
async def setUp(self):
await super().setUp()
patcher = patch('lib.api.ffz.getBroadcasterEmotes')
self.addCleanup(patcher.stop)
self.mock_emotes = patcher.start()
self.mock_emotes.return_value = {18146: 'KevinSquirtle'}
async def test_load(self):
self.assertIs(
await self.data.ffz_load_broadcaster_emotes('botgotsthis'), True)
self.assertTrue(self.mock_emotes.called)
self.mock_emotes.reset_mock()
self.assertIs(
await self.data.ffz_load_broadcaster_emotes('botgotsthis'), True)
self.assertFalse(self.mock_emotes.called)
self.assertIsNotNone(
await self.redis.get(
self.data._ffzBroadcasterEmoteKey('botgotsthis')))
async def test_load_background(self):
self.assertIs(
await self.data.ffz_load_broadcaster_emotes(
'botgotsthis', background=True),
True)
self.assertTrue(self.mock_emotes.called)
self.data.redis.expire(
self.data._ffzBroadcasterEmoteKey('botgotsthis'), 5)
self.mock_emotes.reset_mock()
self.assertIs(
await self.data.ffz_load_broadcaster_emotes(
'botgotsthis', background=True),
True)
self.assertTrue(self.mock_emotes.called)
self.assertIsNotNone(
await self.redis.get(
self.data._ffzBroadcasterEmoteKey('botgotsthis')))
async def test_load_none(self):
self.mock_emotes.return_value = None
self.assertIs(
await self.data.ffz_load_broadcaster_emotes('botgotsthis'), False)
self.assertTrue(self.mock_emotes.called)
self.mock_emotes.reset_mock()
self.assertIs(
await self.data.ffz_load_broadcaster_emotes('botgotsthis'), False)
self.assertTrue(self.mock_emotes.called)
self.assertIsNone(
await self.redis.get(
self.data._ffzBroadcasterEmoteKey('botgotsthis')))
async def test_save(self):
self.assertIs(
await self.data.ffz_save_broadcaster_emotes(
'botgotsthis', {18146: 'KevinSquirtle'}),
True)
self.assertIsNotNone(
await self.redis.get(
self.data._ffzBroadcasterEmoteKey('botgotsthis')))
async def test_get(self):
await self.data.ffz_save_broadcaster_emotes(
'botgotsthis', {18146: 'KevinSquirtle'})
self.assertEqual(
await self.data.ffz_get_broadcaster_emotes('botgotsthis'),
{18146: 'KevinSquirtle'})
async def test_get_empty(self):
self.assertIsNone(
await self.data.ffz_get_broadcaster_emotes('botgotsthis'))
| gpl-3.0 |
menpo/menpo | menpo/visualize/base.py | 2 | 57597 | try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numpy as np
from menpo.base import MenpoMissingDependencyError
class Menpo3dMissingError(MenpoMissingDependencyError):
r"""
Exception that is thrown when an attempt is made to import a 3D
visualisation method, but 'menpo3d' is not installed.
"""
def __init__(self, actual_missing_import_name):
super(Menpo3dMissingError, self).__init__(actual_missing_import_name)
self.message += (
"\nThis import is required in order to use the " "'menpo3d' package"
)
class Renderer(object):
r"""
Abstract class for rendering visualizations. Framework specific
implementations of these classes are made in order to separate
implementation cleanly from the rest of the code.
It is assumed that the renderers follow some form of stateful pattern for
rendering to Figures. Therefore, the major interface for rendering involves
providing a `figure_id` or a `bool` about whether a new figure should be
used. If neither are provided then the default state of the rendering engine
is assumed to be maintained.
Providing both a ``figure_id`` and ``new_figure == True`` is not a valid
state.
Parameters
----------
figure_id : `object`
A figure id. Could be any valid object that identifies a figure in a
given framework (`str`, `int`, `float`, etc.).
new_figure : `bool`
Whether the rendering engine should create a new figure.
Raises
------
ValueError
It is not valid to provide a figure id AND request a new figure to
be rendered on.
"""
def __init__(self, figure_id, new_figure):
if figure_id is not None and new_figure:
raise ValueError(
"Conflicting arguments. figure_id cannot be "
"specified if the new_figure flag is True"
)
self.figure_id = figure_id
self.new_figure = new_figure
self.figure = self.get_figure()
def render(self, **kwargs):
r"""
Abstract method to be overridden by the renderer. This will implement
the actual rendering code for a given object class.
Parameters
----------
kwargs : `dict`
Passed through to specific rendering engine.
Returns
-------
viewer : :map:`Renderer`
Pointer to `self`.
"""
pass
def get_figure(self):
r"""
Abstract method for getting the correct figure to render on. Should
also set the correct `figure_id` for the figure.
Returns
-------
figure : `object`
The figure object that the renderer will render on.
"""
pass
def save_figure(self, **kwargs):
r"""
Abstract method for saving the figure of the current `figure_id` to
file. It will implement the actual saving code for a given object class.
Parameters
----------
kwargs : `dict`
Options to be set when saving the figure to file.
"""
pass
def clear_figure(self):
r"""
Abstract method for clearing the current figure.
"""
pass
def force_draw(self):
r"""
Abstract method for forcing the current figure to render.
"""
pass
class viewwrapper(object):
r"""
This class abuses the Python descriptor protocol in order to dynamically
change the view method at runtime. Although this is more obviously achieved
through inheritance, the view methods practically amount to syntactic sugar
and so we want to maintain a single view method per class. We do not want
to add the mental overhead of implementing different 2D and 3D PointCloud
classes for example, since, outside of viewing, their implementations would
be identical.
Also note that we could have separated out viewing entirely and made the
check there, but the view method is an important paradigm in menpo that
we want to maintain.
Therefore, this function cleverly (and obscurely) returns the correct
view method for the dimensionality of the given object.
"""
def __init__(self, wrapped_func):
fname = wrapped_func.__name__
self._2d_fname = "_{}_2d".format(fname)
self._3d_fname = "_{}_3d".format(fname)
def __get__(self, instance, instancetype):
if instance.n_dims == 2:
return getattr(instance, self._2d_fname)
elif instance.n_dims == 3:
return getattr(instance, self._3d_fname)
else:
def raise_not_supported(*args, **kwargs):
r"""
Viewing of objects with greater than 3 dimensions is not
currently possible.
"""
raise ValueError(
"Viewing of objects with greater than 3 "
"dimensions is not currently possible."
)
return raise_not_supported
class Viewable(object):
r"""
Abstract interface for objects that can visualize themselves. This assumes
that the class has dimensionality as the view method checks the ``n_dims``
property to wire up the correct view method.
"""
@viewwrapper
def view(self):
r"""
Abstract method for viewing. See the :map:`viewwrapper` documentation
for an explanation of how the `view` method works.
"""
pass
def _view_2d(self, **kwargs):
raise NotImplementedError("2D Viewing is not supported.")
def _view_3d(self, **kwargs):
raise NotImplementedError("3D Viewing is not supported.")
class LandmarkableViewable(object):
r"""
Mixin for :map:`Landmarkable` and :map:`Viewable` objects. Provides a
single helper method for viewing Landmarks and `self` on the same figure.
"""
@viewwrapper
def view_landmarks(self, **kwargs):
pass
def _view_landmarks_2d(self, **kwargs):
raise NotImplementedError("2D Landmark Viewing is not supported.")
def _view_landmarks_3d(self, **kwargs):
raise NotImplementedError("3D Landmark Viewing is not supported.")
from menpo.visualize.viewmatplotlib import (
MatplotlibImageViewer2d,
MatplotlibImageSubplotsViewer2d,
MatplotlibLandmarkViewer2d,
MatplotlibAlignmentViewer2d,
MatplotlibGraphPlotter,
MatplotlibMultiImageViewer2d,
MatplotlibMultiImageSubplotsViewer2d,
MatplotlibPointGraphViewer2d,
)
# Default importer types
PointGraphViewer2d = MatplotlibPointGraphViewer2d
LandmarkViewer2d = MatplotlibLandmarkViewer2d
ImageViewer2d = MatplotlibImageViewer2d
ImageSubplotsViewer2d = MatplotlibImageSubplotsViewer2d
AlignmentViewer2d = MatplotlibAlignmentViewer2d
GraphPlotter = MatplotlibGraphPlotter
MultiImageViewer2d = MatplotlibMultiImageViewer2d
MultiImageSubplotsViewer2d = MatplotlibMultiImageSubplotsViewer2d
class ImageViewer(object):
r"""
Base :map:`Image` viewer that abstracts away dimensionality. It can
visualize multiple channels of an image in subplots.
Parameters
----------
figure_id : `object`
A figure id. Could be any valid object that identifies a figure in a
given framework (`str`, `int`, `float`, etc.).
new_figure : `bool`
Whether the rendering engine should create a new figure.
dimensions : {``2``, ``3``} `int`
The number of dimensions in the image.
pixels : ``(N, D)`` `ndarray`
The pixels to render.
channels: `int` or `list` or ``'all'`` or `None`
A specific selection of channels to render. The user can choose either
a single or multiple channels. If ``'all'``, render all channels in
subplot mode. If `None` and image is not greyscale or RGB, render all
channels in subplots. If `None` and image is greyscale or RGB, then do
not plot channels in different subplots.
mask: ``(N, D)`` `ndarray`
A `bool` mask to be applied to the image. All points outside the
mask are set to ``0``.
"""
def __init__(
self, figure_id, new_figure, dimensions, pixels, channels=None, mask=None
):
if len(pixels.shape) == 3 and pixels.shape[0] == 3:
# then probably an RGB image, so ensure the clipped pixels.
from menpo.image import Image
image = Image(pixels, copy=False)
image_clipped = image.clip_pixels()
pixels = image_clipped.pixels
else:
pixels = pixels.copy()
self.figure_id = figure_id
self.new_figure = new_figure
self.dimensions = dimensions
pixels, self.use_subplots = self._parse_channels(channels, pixels)
self.pixels = self._masked_pixels(pixels, mask)
self._flip_image_channels()
def _flip_image_channels(self):
if self.pixels.ndim == 3:
from menpo.image.base import channels_to_back
self.pixels = channels_to_back(self.pixels)
def _parse_channels(self, channels, pixels):
r"""
Parse `channels` parameter. If `channels` is `int` or `list`, keep it as
is. If `channels` is ``'all'``, return a `list` of all the image's
channels. If `channels` is `None`, return the minimum between an
`upper_limit` and the image's number of channels. If image is greyscale
or RGB and `channels` is `None`, then do not plot channels in different
subplots.
Parameters
----------
channels : `int` or `list` or ``'all'`` or `None`
A specific selection of channels to render.
pixels : ``(N, D)`` `ndarray`
The image's pixels to render.
Returns
-------
pixels : ``(N, D)`` `ndarray`
The pixels to be visualized.
use_subplots : `bool`
Whether to visualize using subplots.
"""
# Flag to trigger ImageSubplotsViewer2d or ImageViewer2d
use_subplots = True
n_channels = pixels.shape[0]
if channels is None:
if n_channels == 1:
pixels = pixels[0, ...]
use_subplots = False
elif n_channels == 3:
use_subplots = False
elif channels != "all":
if isinstance(channels, Iterable):
if len(channels) == 1:
pixels = pixels[channels[0], ...]
use_subplots = False
else:
pixels = pixels[channels, ...]
else:
pixels = pixels[channels, ...]
use_subplots = False
return pixels, use_subplots
def _masked_pixels(self, pixels, mask):
r"""
Return the masked pixels using a given `bool` mask. In order to make
sure that the non-masked pixels are visualized in white, their value
is set to the maximum of pixels.
Parameters
----------
pixels : ``(N, D)`` `ndarray`
The image's pixels to render.
mask: ``(N, D)`` `ndarray`
A `bool` mask to be applied to the image. All points outside the
mask are set to the image max. If mask is `None`, then the initial
pixels are returned.
Returns
-------
masked_pixels : ``(N, D)`` `ndarray`
The masked pixels.
"""
if mask is not None:
nanmax = np.nanmax(pixels)
pixels[..., ~mask] = nanmax + (0.01 * nanmax)
return pixels
def render(self, **kwargs):
r"""
Select the correct type of image viewer for the given image
dimensionality.
Parameters
----------
kwargs : `dict`
Passed through to image viewer.
Returns
-------
viewer : :map:`Renderer`
The rendering object.
Raises
------
ValueError
Only 2D images are supported.
"""
if self.dimensions == 2:
if self.use_subplots:
return ImageSubplotsViewer2d(
self.figure_id, self.new_figure, self.pixels
).render(**kwargs)
else:
return ImageViewer2d(
self.figure_id, self.new_figure, self.pixels
).render(**kwargs)
else:
raise ValueError("Only 2D images are currently supported")
def view_image_landmarks(
image,
channels,
masked,
group,
with_labels,
without_labels,
figure_id,
new_figure,
interpolation,
cmap_name,
alpha,
render_lines,
line_colour,
line_style,
line_width,
render_markers,
marker_style,
marker_size,
marker_face_colour,
marker_edge_colour,
marker_edge_width,
render_numbering,
numbers_horizontal_align,
numbers_vertical_align,
numbers_font_name,
numbers_font_size,
numbers_font_style,
numbers_font_weight,
numbers_font_colour,
render_legend,
legend_title,
legend_font_name,
legend_font_style,
legend_font_size,
legend_font_weight,
legend_marker_scale,
legend_location,
legend_bbox_to_anchor,
legend_border_axes_pad,
legend_n_columns,
legend_horizontal_spacing,
legend_vertical_spacing,
legend_border,
legend_border_padding,
legend_shadow,
legend_rounded_corners,
render_axes,
axes_font_name,
axes_font_size,
axes_font_style,
axes_font_weight,
axes_x_limits,
axes_y_limits,
axes_x_ticks,
axes_y_ticks,
figure_size,
):
r"""
This is a helper method that abstracts away the fact that viewing
images and masked images is identical apart from the mask. Therefore,
we do the class check in this method and then proceed identically whether
the image is masked or not.
See the documentation for _view_2d on Image or _view_2d on MaskedImage
for information about the parameters.
"""
import matplotlib.pyplot as plt
if not image.has_landmarks:
raise ValueError(
"Image does not have landmarks attached, unable " "to view landmarks."
)
# Parse axes limits
image_axes_x_limits = None
landmarks_axes_x_limits = axes_x_limits
if axes_x_limits is None:
image_axes_x_limits = landmarks_axes_x_limits = [0, image.width - 1]
image_axes_y_limits = None
landmarks_axes_y_limits = axes_y_limits
if axes_y_limits is None:
image_axes_y_limits = landmarks_axes_y_limits = [0, image.height - 1]
# Render image
from menpo.image import MaskedImage
if isinstance(image, MaskedImage):
self_view = image.view(
figure_id=figure_id,
new_figure=new_figure,
channels=channels,
masked=masked,
interpolation=interpolation,
cmap_name=cmap_name,
alpha=alpha,
render_axes=render_axes,
axes_x_limits=image_axes_x_limits,
axes_y_limits=image_axes_y_limits,
)
else:
self_view = image.view(
figure_id=figure_id,
new_figure=new_figure,
channels=channels,
interpolation=interpolation,
cmap_name=cmap_name,
alpha=alpha,
render_axes=render_axes,
axes_x_limits=image_axes_x_limits,
axes_y_limits=image_axes_y_limits,
)
# Render landmarks
# correct group label in legend
if group is None and image.landmarks.n_groups == 1:
group = image.landmarks.group_labels[0]
landmark_view = None # initialize viewer object
# useful in order to visualize the legend only for the last axis object
render_legend_tmp = False
for i, ax in enumerate(self_view.axes_list):
# set current axis
plt.sca(ax)
# show legend only for the last axis object
if i == len(self_view.axes_list) - 1:
render_legend_tmp = render_legend
# viewer
landmark_view = image.landmarks[group].view(
with_labels=with_labels,
without_labels=without_labels,
group=group,
figure_id=self_view.figure_id,
new_figure=False,
image_view=True,
render_lines=render_lines,
line_colour=line_colour,
line_style=line_style,
line_width=line_width,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_legend=render_legend_tmp,
legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=landmarks_axes_x_limits,
axes_y_limits=landmarks_axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
figure_size=figure_size,
)
return landmark_view
class MultipleImageViewer(ImageViewer):
def __init__(
self, figure_id, new_figure, dimensions, pixels_list, channels=None, mask=None
):
super(MultipleImageViewer, self).__init__(
figure_id,
new_figure,
dimensions,
pixels_list[0],
channels=channels,
mask=mask,
)
pixels_list = [self._parse_channels(channels, p)[0] for p in pixels_list]
self.pixels_list = [self._masked_pixels(p, mask) for p in pixels_list]
def render(self, **kwargs):
if self.dimensions == 2:
if self.use_subplots:
MultiImageSubplotsViewer2d(
self.figure_id, self.new_figure, self.pixels_list
).render(**kwargs)
else:
return MultiImageViewer2d(
self.figure_id, self.new_figure, self.pixels_list
).render(**kwargs)
else:
raise ValueError("Only 2D images are currently supported")
def plot_curve(
x_axis,
y_axis,
figure_id=None,
new_figure=True,
legend_entries=None,
title="",
x_label="",
y_label="",
axes_x_limits=0.0,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
render_lines=True,
line_colour=None,
line_style="-",
line_width=1,
render_markers=True,
marker_style="o",
marker_size=5,
marker_face_colour=None,
marker_edge_colour="k",
marker_edge_width=1.0,
render_legend=True,
legend_title="",
legend_font_name="sans-serif",
legend_font_style="normal",
legend_font_size=10,
legend_font_weight="normal",
legend_marker_scale=None,
legend_location=2,
legend_bbox_to_anchor=(1.05, 1.0),
legend_border_axes_pad=None,
legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None,
legend_border=True,
legend_border_padding=None,
legend_shadow=False,
legend_rounded_corners=False,
render_axes=True,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
figure_size=(7, 7),
render_grid=True,
grid_line_style="--",
grid_line_width=1,
):
r"""
Plot a single or multiple curves on the same figure.
Parameters
----------
x_axis : `list` or `array`
The values of the horizontal axis. They are common for all curves.
y_axis : `list` of `lists` or `arrays`
A `list` with `lists` or `arrays` with the values of the vertical axis
for each curve.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
legend_entries : `list of `str` or ``None``, optional
If `list` of `str`, it must have the same length as `errors` `list` and
each `str` will be used to name each curve. If ``None``, the CED curves
will be named as `'Curve %d'`.
title : `str`, optional
The figure's title.
x_label : `str`, optional
The label of the horizontal axis.
y_label : `str`, optional
The label of the vertical axis.
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the graph as a percentage of the curves' width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then the
limits are set automatically.
axes_y_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the graph as a percentage of the curves' height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then the
limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
render_lines : `bool` or `list` of `bool`, optional
If ``True``, the line will be rendered. If `bool`, this value will be
used for all curves. If `list`, a value must be specified for each
curve, thus it must have the same length as `y_axis`.
line_colour : `colour` or `list` of `colour` or ``None``, optional
The colour of the lines. If not a `list`, this value will be
used for all curves. If `list`, a value must be specified for each
curve, thus it must have the same length as `y_axis`. If ``None``, the
colours will be linearly sampled from jet colormap.
Example `colour` options are ::
{'r', 'g', 'b', 'c', 'm', 'k', 'w'}
or
(3, ) ndarray
line_style : ``{'-', '--', '-.', ':'}`` or `list` of those, optional
The style of the lines. If not a `list`, this value will be used for all
curves. If `list`, a value must be specified for each curve, thus it must
have the same length as `y_axis`.
line_width : `float` or `list` of `float`, optional
The width of the lines. If `float`, this value will be used for all
curves. If `list`, a value must be specified for each curve, thus it must
have the same length as `y_axis`.
render_markers : `bool` or `list` of `bool`, optional
If ``True``, the markers will be rendered. If `bool`, this value will be
used for all curves. If `list`, a value must be specified for each
curve, thus it must have the same length as `y_axis`.
marker_style : `marker` or `list` of `markers`, optional
The style of the markers. If not a `list`, this value will be used for
all curves. If `list`, a value must be specified for each curve, thus it
must have the same length as `y_axis`.
Example `marker` options ::
{'.', ',', 'o', 'v', '^', '<', '>', '+', 'x', 'D', 'd', 's',
'p', '*', 'h', 'H', '1', '2', '3', '4', '8'}
marker_size : `int` or `list` of `int`, optional
The size of the markers in points. If `int`, this value will be used
for all curves. If `list`, a value must be specified for each curve, thus
it must have the same length as `y_axis`.
marker_face_colour : `colour` or `list` of `colour` or ``None``, optional
The face (filling) colour of the markers. If not a `list`, this value
will be used for all curves. If `list`, a value must be specified for
each curve, thus it must have the same length as `y_axis`. If ``None``,
the colours will be linearly sampled from jet colormap.
Example `colour` options are ::
{'r', 'g', 'b', 'c', 'm', 'k', 'w'}
or
(3, ) ndarray
marker_edge_colour : `colour` or `list` of `colour` or ``None``, optional
The edge colour of the markers. If not a `list`, this value will be used
for all curves. If `list`, a value must be specified for each curve, thus
it must have the same length as `y_axis`. If ``None``, the colours will
be linearly sampled from jet colormap.
Example `colour` options are ::
{'r', 'g', 'b', 'c', 'm', 'k', 'w'}
or
(3, ) ndarray
marker_edge_width : `float` or `list` of `float`, optional
The width of the markers' edge. If `float`, this value will be used for
all curves. If `list`, a value must be specified for each curve, thus it
must have the same length as `y_axis`.
render_legend : `bool`, optional
If ``True``, the legend will be rendered.
legend_title : `str`, optional
The title of the legend.
legend_font_name : See below, optional
The font of the legend.
Example options ::
{'serif', 'sans-serif', 'cursive', 'fantasy', 'monospace'}
legend_font_style : ``{'normal', 'italic', 'oblique'}``, optional
The font style of the legend.
legend_font_size : `int`, optional
The font size of the legend.
legend_font_weight : See below, optional
The font weight of the legend.
Example options ::
{'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'}
legend_marker_scale : `float`, optional
The relative size of the legend markers with respect to the original
legend_location : `int`, optional
The location of the legend. The predefined values are:
=============== ===
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== ===
legend_bbox_to_anchor : (`float`, `float`), optional
The bbox that the legend will be anchored.
legend_border_axes_pad : `float`, optional
The pad between the axes and legend border.
legend_n_columns : `int`, optional
The number of the legend's columns.
legend_horizontal_spacing : `float`, optional
The spacing between the columns.
legend_vertical_spacing : `float`, optional
The vertical space between the legend entries.
legend_border : `bool`, optional
If ``True``, a frame will be drawn around the legend.
legend_border_padding : `float`, optional
The fractional whitespace inside the legend border.
legend_shadow : `bool`, optional
If ``True``, a shadow will be drawn behind legend.
legend_rounded_corners : `bool`, optional
If ``True``, the frame's corners will be rounded (fancybox).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See below, optional
The font of the axes.
Example options ::
{'serif', 'sans-serif', 'cursive', 'fantasy', 'monospace'}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{'normal', 'italic', 'oblique'}``, optional
The font style of the axes.
axes_font_weight : See below, optional
The font weight of the axes.
Example options ::
{'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'}
figure_size : (`float`, `float`) or ``None``, optional
The size of the figure in inches.
render_grid : `bool`, optional
If ``True``, the grid will be rendered.
grid_line_style : ``{'-', '--', '-.', ':'}``, optional
The style of the grid lines.
grid_line_width : `float`, optional
The width of the grid lines.
Raises
------
ValueError
legend_entries list has different length than y_axis list
Returns
-------
viewer : :map:`GraphPlotter`
The viewer object.
"""
from menpo.visualize import GraphPlotter
# check y_axis
if not isinstance(y_axis, list):
y_axis = [y_axis]
# check legend_entries
if legend_entries is not None and len(legend_entries) != len(y_axis):
raise ValueError("legend_entries list has different length than y_axis " "list")
# render
return GraphPlotter(
figure_id=figure_id,
new_figure=new_figure,
x_axis=x_axis,
y_axis=y_axis,
title=title,
legend_entries=legend_entries,
x_label=x_label,
y_label=y_label,
x_axis_limits=axes_x_limits,
y_axis_limits=axes_y_limits,
x_axis_ticks=axes_x_ticks,
y_axis_ticks=axes_y_ticks,
).render(
render_lines=render_lines,
line_colour=line_colour,
line_style=line_style,
line_width=line_width,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_legend=render_legend,
legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
figure_size=figure_size,
render_grid=render_grid,
grid_line_style=grid_line_style,
grid_line_width=grid_line_width,
)
def render_rectangles_around_patches(
centers,
patch_shape,
axes=None,
image_view=True,
line_colour="r",
line_style="-",
line_width=1,
interpolation="none",
):
r"""
Method that renders rectangles of the specified `patch_shape` centered
around all the points of the provided `centers`.
Parameters
----------
centers : :map:`PointCloud`
The centers around which to draw the rectangles.
patch_shape : `tuple` or `ndarray`, optional
The size of the rectangle to render.
axes : `matplotlib.pyplot.axes` object or ``None``, optional
The axes object on which to render.
image_view : `bool`, optional
If ``True`` the rectangles will be viewed as if they are in the image
coordinate system.
line_colour : See Below, optional
The colour of the lines.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : ``{-, --, -., :}``, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
interpolation : See Below, optional
In case a patch-based image is already rendered on the specified axes,
this argument controls how tight the rectangles would be to the patches.
It needs to have the same value as the one used when rendering the
patches image, otherwise there is the danger that the rectangles won't
be exactly on the border of the patches. Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36,
hanning, hamming, hermite, kaiser, quadric, catrom, gaussian,
bessel, mitchell, sinc, lanczos}
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
# Dictionary with the line styles
line_style_dict = {"-": "solid", "--": "dashed", "-.": "dashdot", ":": "dotted"}
# Get axes object
if axes is None:
axes = plt.gca()
# Need those in order to compute the lower left corner of the rectangle
half_patch_shape = [patch_shape[0] / 2, patch_shape[1] / 2]
# Set the view mode
if image_view:
xi = 1
yi = 0
else:
xi = 0
yi = 1
# Set correct offsets so that the rectangle is tight to the patch
if interpolation == "none":
off_start = 0.5
off_end = 0.0
else:
off_start = 1.0
off_end = 0.5
# Render rectangles
for p in range(centers.shape[0]):
xc = np.intp(centers[p, xi] - half_patch_shape[xi]) - off_start
yc = np.intp(centers[p, yi] - half_patch_shape[yi]) - off_start
axes.add_patch(
Rectangle(
(xc, yc),
patch_shape[xi] + off_end,
patch_shape[yi] + off_end,
fill=False,
edgecolor=line_colour,
linewidth=line_width,
linestyle=line_style_dict[line_style],
)
)
def view_patches(
patches,
patch_centers,
patches_indices=None,
offset_index=None,
figure_id=None,
new_figure=False,
background="white",
render_patches=True,
channels=None,
interpolation="none",
cmap_name=None,
alpha=1.0,
render_patches_bboxes=True,
bboxes_line_colour="r",
bboxes_line_style="-",
bboxes_line_width=1,
render_centers=True,
render_lines=True,
line_colour=None,
line_style="-",
line_width=1,
render_markers=True,
marker_style="o",
marker_size=5,
marker_face_colour=None,
marker_edge_colour=None,
marker_edge_width=1.0,
render_numbering=False,
numbers_horizontal_align="center",
numbers_vertical_align="bottom",
numbers_font_name="sans-serif",
numbers_font_size=10,
numbers_font_style="normal",
numbers_font_weight="normal",
numbers_font_colour="k",
render_axes=False,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
axes_x_limits=None,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
figure_size=(7, 7),
):
r"""
Method that renders the provided `patches` on a canvas. The user can
choose whether to render the patch centers (`render_centers`) as well as
rectangle boundaries around the patches (`render_patches_bboxes`).
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods of the :map:`Image` class. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that are
returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image` objects.
patch_centers : :map:`PointCloud`
The centers around which to visualize the patches.
patches_indices : `int` or `list` of `int` or ``None``, optional
Defines the patches that will be visualized. If ``None``, then all the
patches are selected.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the index
of the second dimension from which to sample. If ``None``, then ``0`` is
used.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
background : ``{'black', 'white'}``, optional
If ``'black'``, then the background is set equal to the minimum value
of `patches`. If ``'white'``, then the background is set equal to the
maximum value of `patches`.
render_patches : `bool`, optional
Flag that determines whether to render the patch values.
channels : `int` or `list` of `int` or ``all`` or ``None``, optional
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
interpolation : See Below, optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated. Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36, hanning,
hamming, hermite, kaiser, quadric, catrom, gaussian, bessel,
mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
render_patches_bboxes : `bool`, optional
Flag that determines whether to render the bounding box lines around the
patches.
bboxes_line_colour : See Below, optional
The colour of the lines.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
bboxes_line_style : ``{-, --, -., :}``, optional
The style of the lines.
bboxes_line_width : `float`, optional
The width of the lines.
render_centers : `bool`, optional
Flag that determines whether to render the patch centers.
render_lines : `bool`, optional
If ``True``, the edges will be rendered.
line_colour : See Below, optional
The colour of the lines.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : ``{-, --, -., :}``, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The width of the markers' edge.
render_numbering : `bool`, optional
If ``True``, the landmarks will be numbered.
numbers_horizontal_align : ``{center, right, left}``, optional
The horizontal alignment of the numbers' texts.
numbers_vertical_align : ``{center, top, bottom, baseline}``, optional
The vertical alignment of the numbers' texts.
numbers_font_name : See Below, optional
The font of the numbers. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
numbers_font_size : `int`, optional
The font size of the numbers.
numbers_font_style : ``{normal, italic, oblique}``, optional
The font style of the numbers.
numbers_font_weight : See Below, optional
The font weight of the numbers.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
numbers_font_colour : See Below, optional
The font colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold,demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the shape as a percentage of the shape's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then the
limits are set automatically.
axes_y_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the shape as a percentage of the shape's height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then the
limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
Returns
-------
viewer : `ImageViewer`
The image viewing object.
"""
from menpo.image.base import (
_convert_patches_list_to_single_array,
_create_patches_image,
)
# If patches is a list, convert it to an array
if isinstance(patches, list):
patches = _convert_patches_list_to_single_array(patches, patch_centers.n_points)
# Create patches image
if render_patches:
patches_image = _create_patches_image(
patches,
patch_centers,
patches_indices=patches_indices,
offset_index=offset_index,
background=background,
)
else:
if background == "black":
tmp_patches = np.zeros(
(
patches.shape[0],
patches.shape[1],
3,
patches.shape[3],
patches.shape[4],
)
)
elif background == "white":
tmp_patches = np.ones(
(
patches.shape[0],
patches.shape[1],
3,
patches.shape[3],
patches.shape[4],
)
)
patches_image = _create_patches_image(
tmp_patches,
patch_centers,
patches_indices=patches_indices,
offset_index=offset_index,
background=background,
)
channels = None
# Render patches image
if render_centers:
patch_view = patches_image.view_landmarks(
channels=channels,
group="patch_centers",
figure_id=figure_id,
new_figure=new_figure,
interpolation=interpolation,
cmap_name=cmap_name,
alpha=alpha,
render_lines=render_lines,
line_colour=line_colour,
line_style=line_style,
line_width=line_width,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_legend=False,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
figure_size=figure_size,
)
else:
patch_view = patches_image.view(
figure_id=figure_id,
new_figure=new_figure,
channels=channels,
interpolation=interpolation,
cmap_name=cmap_name,
alpha=alpha,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
figure_size=figure_size,
)
# Render rectangles around patches
if render_patches_bboxes:
patch_shape = [patches.shape[3], patches.shape[4]]
render_rectangles_around_patches(
patches_image.landmarks["patch_centers"].points,
patch_shape,
image_view=True,
line_colour=bboxes_line_colour,
line_style=bboxes_line_style,
line_width=bboxes_line_width,
interpolation=interpolation,
)
return patch_view
def plot_gaussian_ellipses(
covariances,
means,
n_std=2,
render_colour_bar=True,
colour_bar_label="Normalized Standard Deviation",
colour_map="jet",
figure_id=None,
new_figure=False,
image_view=True,
line_colour="r",
line_style="-",
line_width=1.0,
render_markers=True,
marker_edge_colour="k",
marker_face_colour="k",
marker_edge_width=1.0,
marker_size=5,
marker_style="o",
render_axes=False,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
crop_proportion=0.1,
figure_size=(7, 7),
):
r"""
Method that renders the Gaussian ellipses that correspond to a set of
covariance matrices and mean vectors. Naturally, this only works for
2-dimensional random variables.
Parameters
----------
covariances : `list` of ``(2, 2)`` `ndarray`
The covariance matrices that correspond to each ellipse.
means : `list` of ``(2, )`` `ndarray`
The mean vectors that correspond to each ellipse.
n_std : `float`, optional
This defines the size of the ellipses in terms of number of standard
deviations.
render_colour_bar : `bool`, optional
If ``True``, then the ellipses will be coloured based on their
normalized standard deviations and a colour bar will also appear on
the side. If ``False``, then all the ellipses will have the same colour.
colour_bar_label : `str`, optional
The title of the colour bar. It only applies if `render_colour_bar`
is ``True``.
colour_map : `str`, optional
A valid Matplotlib colour map. For more info, please refer to
`matplotlib.cm`.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
image_view : `bool`, optional
If ``True`` the ellipses will be rendered in the image coordinates
system.
line_colour : See Below, optional
The colour of the lines of the ellipses.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : ``{-, --, -., :}``, optional
The style of the lines of the ellipses.
line_width : `float`, optional
The width of the lines of the ellipses.
render_markers : `bool`, optional
If ``True``, the centers of the ellipses will be rendered.
marker_style : See Below, optional
The style of the centers of the ellipses. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the centers of the ellipses in points.
marker_face_colour : See Below, optional
The face (filling) colour of the centers of the ellipses.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the centers of the ellipses.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The edge width of the centers of the ellipses.
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold,demibold, demi, bold, heavy, extra bold, black}
crop_proportion : `float`, optional
The proportion to be left around the centers' pointcloud.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.colors as colors
import matplotlib.cm as cmx
from matplotlib.font_manager import FontProperties
from menpo.shape import PointCloud
def eigh_sorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:, order]
# get correct line style
if line_style == "-":
line_style = "solid"
elif line_style == "--":
line_style = "dashed"
elif line_style == "-.":
line_style = "dashdot"
elif line_style == ":":
line_style = "dotted"
else:
raise ValueError("line_style must be selected from " "['-', '--', '-.', ':'].")
# create pointcloud
pc = PointCloud(np.array(means))
# compute axes limits
bounds = pc.bounds()
r = pc.range()
x_rr = r[0] * crop_proportion
y_rr = r[1] * crop_proportion
axes_x_limits = [bounds[0][1] - x_rr, bounds[1][1] + x_rr]
axes_y_limits = [bounds[0][0] - y_rr, bounds[1][0] + y_rr]
normalizer = np.sum(r) / 2.0
# compute height, width, theta and std
stds = []
heights = []
widths = []
thetas = []
for cov in covariances:
vals, vecs = eigh_sorted(cov)
width, height = np.sqrt(vals)
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
stds.append(np.mean([height, width]) / normalizer)
heights.append(height)
widths.append(width)
thetas.append(theta)
if render_colour_bar:
# set colormap values
cmap = plt.get_cmap(colour_map)
cNorm = colors.Normalize(vmin=np.min(stds), vmax=np.max(stds))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cmap)
# visualize pointcloud
if render_colour_bar:
renderer = pc.view(
figure_id=figure_id,
new_figure=new_figure,
image_view=image_view,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
figure_size=figure_size,
render_markers=False,
)
else:
renderer = pc.view(
figure_id=figure_id,
new_figure=new_figure,
image_view=image_view,
marker_edge_colour=marker_edge_colour,
marker_face_colour=marker_face_colour,
marker_edge_width=marker_edge_width,
marker_size=marker_size,
marker_style=marker_style,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
figure_size=figure_size,
render_markers=render_markers,
)
# plot ellipses
ax = plt.gca()
for i in range(len(covariances)):
# Width and height are "full" widths, not radius
width = 2 * n_std * widths[i]
height = 2 * n_std * heights[i]
if image_view:
colour = line_colour
if render_colour_bar:
colour = scalarMap.to_rgba(stds[i])
if render_markers:
plt.plot(
means[i][1],
means[i][0],
facecolor=colour,
edgecolor=colour,
linewidth=0,
)
ellip = Ellipse(
xy=means[i][-1::-1],
width=height,
height=width,
angle=thetas[i],
linestyle=line_style,
linewidth=line_width,
edgecolor=colour,
facecolor="none",
)
else:
colour = line_colour
if render_colour_bar:
colour = scalarMap.to_rgba(stds[i])
if render_markers:
plt.plot(
means[i][0],
means[i][1],
facecolor=colour,
edgecolor=colour,
linewidth=0,
)
ellip = Ellipse(
xy=means[i],
width=width,
height=height,
angle=thetas[i],
linestyle=line_style,
linewidth=line_width,
edgecolor=colour,
facecolor="none",
)
ax.add_artist(ellip)
# show colour bar
if render_colour_bar:
scalarMap.set_array(stds)
cb = plt.colorbar(scalarMap, label=colour_bar_label)
# change colour bar's font properties
ax = cb.ax
text = ax.yaxis.label
font = FontProperties(
size=axes_font_size,
weight=axes_font_weight,
style=axes_font_style,
family=axes_font_name,
)
text.set_font_properties(font)
return renderer
| bsd-3-clause |
usakhelo/FreeCAD | src/Mod/Points/Init.py | 11 | 2234 | # FreeCAD init script of the Points module
# (c) 2005 Juergen Riegel
#***************************************************************************
#* (c) Juergen Riegel (juergen.riegel@web.de) 2005 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
# Append the open handler
FreeCAD.addImportType("Point formats (*.asc)","Points")
FreeCAD.addImportType("PLY points (*.ply)","Points")
FreeCAD.addImportType("PCD points (*.pcd)","Points")
FreeCAD.addExportType("Point formats (*.asc)","Points")
FreeCAD.addExportType("PLY points (*.ply)","Points")
FreeCAD.addExportType("PCD points (*.pcd)","Points")
| lgpl-2.1 |
snithish/spark-testing-base | python/sparktestingbase/test/simple_test.py | 10 | 1208 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple test example"""
from sparktestingbase.testcase import SparkTestingBaseTestCase
import unittest2
class SimpleTest(SparkTestingBaseTestCase):
"""A simple test."""
def test_basic(self):
"""Test a simple collect."""
input = ["hello world"]
rdd = self.sc.parallelize(input)
result = rdd.collect()
assert result == input
if __name__ == "__main__":
unittest2.main()
| apache-2.0 |
foreni-packages/webslayer | TextParser.py | 9 | 2989 | #Covered by GPL V2.0
#Coded by Carlos del Ojo Elias (deepbit@gmail.com)
import sys
import re
class TextParser:
def __init__ (self):
self.string=""
self.oldindex=0
self.newindex=0
self.type=""
self.lastFull_line=None
self.lastline = None
pass
def __del__ (self):
if self.type=="file":
self.fd.close()
def setSource (self,t,*args):
'''Se especifica el tipo de entrada. Puede ser fichero o entrada estandard
Ejemplos: setSource("file","/tmp/file")
setSource("stdin")\n'''
if t=="file":
self.type=t
self.fd=file(args[0],"r")
elif t=="stdin":
if self.type=="file":
self.fd.close()
self.type=t
elif t=="string":
if self.type=="file":
self.fd.close()
self.type=t
self.string=args[0]
self.oldindex=0
self.newindex=0
else:
print "Bad argument -- TextParser.setSource()\n"
sys.exit (-1)
def seekinit(self):
self.oldindex=0;
self.newindex=0;
def readUntil (self,pattern,caseSens=True):
"Lee lineas hasta que el patron (pattern) conincide en alguna linea"
while True:
if (self.readLine() == 0):
return False
if (self.search(pattern,caseSens) == True):
break
return True
def search (self,pattern,caseSens=True,debug=0):
"Intenta hacer Matching entre el pattern pasado por parametro y la ultima linea leida"
if not caseSens:
self.regexp=re.compile(pattern,re.IGNORECASE)
else:
self.regexp=re.compile(pattern)
self.matches=self.regexp.findall(self.lastline)
j=0
for i in self.matches:
if not type(i)==type(()):
self.matches[j]=tuple([self.matches[j]])
j+=1
# DEBUG PARA MATCHING
if (debug==1):
print "[",self.lastline,"-",pattern,"]"
print len(self.matches)
print self.matches
if len(self.matches)==0:
return False
else:
return True
def __getitem__ (self,key):
"Para acceder a cada uno de los patrones que coinciden, esta preparado paragrupos de patrones, no para solo un patron"
return self.matches[key]
def skip (self,lines):
"Salta las lines que se indiquen en el parametro"
for i in range(lines):
if (self.readLine() == 0):
return False
return True
def readLine(self):
"Lee la siguiente linea eliminando retornos de carro"
if self.type=="file":
self.lastFull_line=self.fd.readline()
elif self.type=="stdin":
self.lastFull_line=raw_input()
elif self.type=="string":
if self.newindex==-1:
return 0
if self.oldindex>=0:
self.newindex=self.string.find("\n",self.oldindex,len(self.string))
if self.newindex==-1:
self.lastFull_line=self.string[self.oldindex:len(self.string)]
else:
self.lastFull_line=self.string[self.oldindex:self.newindex+1]
self.oldindex=self.newindex+1
else:
self.lastFull_line=''
bytes_read = len(self.lastFull_line)
s=self.lastFull_line
self.lastline=s
if s[-2:] == '\r\n':
self.lastline = s[:-2]
elif s[-1:] == '\r' or s[-1:] == '\n':
self.lastline = s[:-1]
return bytes_read
| gpl-2.0 |
ajgallegog/gem5_arm | src/arch/x86/isa/insts/general_purpose/string/store_string.py | 91 | 2979 | # Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop STOS_M {
# Find the constant we need to either add or subtract from rdi
ruflag t0, 10
movi t3, t3, dsz, flags=(CEZF,), dataSize=asz
subi t4, t0, dsz, dataSize=asz
mov t3, t3, t4, flags=(nCEZF,), dataSize=asz
st rax, es, [1, t0, rdi]
add rdi, rdi, t3, dataSize=asz
};
def macroop STOS_E_M {
and t0, rcx, rcx, flags=(EZF,), dataSize=asz
br label("end"), flags=(CEZF,)
# Find the constant we need to either add or subtract from rdi
ruflag t0, 10
movi t3, t3, dsz, flags=(CEZF,), dataSize=asz
subi t4, t0, dsz, dataSize=asz
mov t3, t3, t4, flags=(nCEZF,), dataSize=asz
topOfLoop:
st rax, es, [1, t0, rdi]
subi rcx, rcx, 1, flags=(EZF,), dataSize=asz
add rdi, rdi, t3, dataSize=asz
br label("topOfLoop"), flags=(nCEZF,)
end:
fault "NoFault"
};
'''
| bsd-3-clause |
s40523241/2016fallcp_hw | plugin/liquid_tags/graphviz.py | 245 | 3198 | """
GraphViz Tag
---------
This implements a Liquid-style graphviz tag for Pelican. You can use different
Graphviz programs like dot, neato, twopi etc. [1]
[1] http://www.graphviz.org/
Syntax
------
{% graphviz
<program> {
<DOT code>
}
%}
Examples
--------
{% graphviz
dot {
digraph graphname {
a -> b -> c;
b -> d;
}
}
%}
{% graphviz
twopi {
<code goes here>
}
%}
{% graphviz
neato {
<code goes here>
}
%}
...
Output
------
<div class="graphviz" style="text-align: center;"><img src="data:image/png;base64,_BASE64_IMAGE DATA_/></div>
"""
import base64
import re
from .mdx_liquid_tags import LiquidTags
SYNTAX = '{% dot graphviz [program] [dot code] %}'
DOT_BLOCK_RE = re.compile(r'^\s*(?P<program>\w+)\s*\{\s*(?P<code>.*\})\s*\}$', re.MULTILINE | re.DOTALL)
def run_graphviz(program, code, options=[], format='png'):
""" Runs graphviz programs and returns image data
Copied from https://github.com/tkf/ipython-hierarchymagic/blob/master/hierarchymagic.py
"""
import os
from subprocess import Popen, PIPE
dot_args = [program] + options + ['-T', format]
if os.name == 'nt':
# Avoid opening shell window.
# * https://github.com/tkf/ipython-hierarchymagic/issues/1
# * http://stackoverflow.com/a/2935727/727827
p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE, creationflags=0x08000000)
else:
p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE)
wentwrong = False
try:
# Graphviz may close standard input when an error occurs,
# resulting in a broken pipe on communicate()
stdout, stderr = p.communicate(code.encode('utf-8'))
except (OSError, IOError) as err:
if err.errno != EPIPE:
raise
wentwrong = True
except IOError as err:
if err.errno != EINVAL:
raise
wentwrong = True
if wentwrong:
# in this case, read the standard output and standard error streams
# directly, to get the error message(s)
stdout, stderr = p.stdout.read(), p.stderr.read()
p.wait()
if p.returncode != 0:
raise RuntimeError('dot exited with error:\n[stderr]\n{0}'.format(stderr.decode('utf-8')))
return stdout
@LiquidTags.register('graphviz')
def graphviz_parser(preprocessor, tag, markup):
""" Simple Graphviz parser """
# Parse the markup string
m = DOT_BLOCK_RE.search(markup)
if m:
# Get program and DOT code
code = m.group('code')
program = m.group('program').strip()
# Run specified program with our markup
output = run_graphviz(program, code)
# Return Base64 encoded image
return '<div class="graphviz" style="text-align: center;"><img src="data:image/png;base64,%s"></div>' % base64.b64encode(output)
else:
raise ValueError('Error processing input. '
'Expected syntax: {0}'.format(SYNTAX))
#----------------------------------------------------------------------
# This import allows image tag to be a Pelican plugin
from .liquid_tags import register
| agpl-3.0 |
gmccollister/ansible-virtd-docker-mariadb-bacula-nginx | library/gitlab_hook.py | 2 | 13770 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
# Copyright: (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
# Based on code:
# Copyright: (c) 2013, Phillip Gentry <phillip@cx.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gitlab_hook
short_description: Manages GitLab project hooks.
description:
- Adds, updates and removes project hook
version_added: "2.6"
author:
- Marcus Watkins (@marwatk)
- Guillaume Martinez (@Lunik)
requirements:
- python >= 2.7
- python-gitlab python module <= 1.12.1
extends_documentation_fragment:
- auth_basic
options:
api_token:
description:
- Gitlab token for logging in.
version_added: "2.8"
type: str
aliases:
- private_token
- access_token
project:
description:
- Id or Full path of the project in the form of group/name
required: true
type: str
hook_url:
description:
- The url that you want GitLab to post to, this is used as the primary key for updates and deletion.
required: true
type: str
state:
description:
- When C(present) the hook will be updated to match the input or created if it doesn't exist. When C(absent) it will be deleted if it exists.
required: true
default: present
type: str
choices: [ "present", "absent" ]
push_events:
description:
- Trigger hook on push events
type: bool
default: yes
issues_events:
description:
- Trigger hook on issues events
type: bool
default: no
merge_requests_events:
description:
- Trigger hook on merge requests events
type: bool
default: no
tag_push_events:
description:
- Trigger hook on tag push events
type: bool
default: no
note_events:
description:
- Trigger hook on note events
type: bool
default: no
job_events:
description:
- Trigger hook on job events
type: bool
default: no
pipeline_events:
description:
- Trigger hook on pipeline events
type: bool
default: no
wiki_page_events:
description:
- Trigger hook on wiki events
type: bool
default: no
hook_validate_certs:
description:
- Whether GitLab will do SSL verification when triggering the hook
type: bool
default: no
aliases: [ enable_ssl_verification ]
token:
description:
- Secret token to validate hook messages at the receiver.
- If this is present it will always result in a change as it cannot be retrieved from GitLab.
- Will show up in the X-Gitlab-Token HTTP request header
required: false
type: str
'''
EXAMPLES = '''
- name: "Adding a project hook"
gitlab_hook:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
project: "my_group/my_project"
hook_url: "https://my-ci-server.example.com/gitlab-hook"
state: present
push_events: yes
tag_push_events: yes
hook_validate_certs: no
token: "my-super-secret-token-that-my-ci-server-will-check"
- name: "Delete the previous hook"
gitlab_hook:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
project: "my_group/my_project"
hook_url: "https://my-ci-server.example.com/gitlab-hook"
state: absent
- name: "Delete a hook by numeric project id"
gitlab_hook:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
project: 10
hook_url: "https://my-ci-server.example.com/gitlab-hook"
state: absent
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: str
sample: "Success"
result:
description: json parsed response from the server
returned: always
type: dict
error:
description: the error message returned by the Gitlab API
returned: failed
type: str
sample: "400: path is already in use"
hook:
description: API object
returned: always
type: dict
'''
import os
import re
import traceback
GITLAB_IMP_ERR = None
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except Exception:
GITLAB_IMP_ERR = traceback.format_exc()
HAS_GITLAB_PACKAGE = False
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible.module_utils.gitlab import findProject
class GitLabHook(object):
def __init__(self, module, gitlab_instance):
self._module = module
self._gitlab = gitlab_instance
self.hookObject = None
'''
@param project Project Object
@param hook_url Url to call on event
@param description Description of the group
@param parent Parent group full path
'''
def createOrUpdateHook(self, project, hook_url, options):
changed = False
# Because we have already call userExists in main()
if self.hookObject is None:
hook = self.createHook(project, {
'url': hook_url,
'push_events': options['push_events'],
'issues_events': options['issues_events'],
'merge_requests_events': options['merge_requests_events'],
'tag_push_events': options['tag_push_events'],
'note_events': options['note_events'],
'job_events': options['job_events'],
'pipeline_events': options['pipeline_events'],
'wiki_page_events': options['wiki_page_events'],
'enable_ssl_verification': options['enable_ssl_verification'],
'token': options['token']})
changed = True
else:
changed, hook = self.updateHook(self.hookObject, {
'push_events': options['push_events'],
'issues_events': options['issues_events'],
'merge_requests_events': options['merge_requests_events'],
'tag_push_events': options['tag_push_events'],
'note_events': options['note_events'],
'job_events': options['job_events'],
'pipeline_events': options['pipeline_events'],
'wiki_page_events': options['wiki_page_events'],
'enable_ssl_verification': options['enable_ssl_verification'],
'token': options['token']})
self.hookObject = hook
if changed:
if self._module.check_mode:
self._module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url)
try:
hook.save()
except Exception as e:
self._module.fail_json(msg="Failed to update hook: %s " % e)
return True
else:
return False
'''
@param project Project Object
@param arguments Attributs of the hook
'''
def createHook(self, project, arguments):
if self._module.check_mode:
return True
hook = project.hooks.create(arguments)
return hook
'''
@param hook Hook Object
@param arguments Attributs of the hook
'''
def updateHook(self, hook, arguments):
changed = False
for arg_key, arg_value in arguments.items():
if arguments[arg_key] is not None:
if arg_key == "token" or getattr(hook, arg_key) != arguments[arg_key]:
setattr(hook, arg_key, arguments[arg_key])
changed = True
return (changed, hook)
'''
@param project Project object
@param hook_url Url to call on event
'''
def findHook(self, project, hook_url):
hooks = project.hooks.list()
for hook in hooks:
if (hook.url == hook_url):
return hook
'''
@param project Project object
@param hook_url Url to call on event
'''
def existsHook(self, project, hook_url):
# When project exists, object will be stored in self.projectObject.
hook = self.findHook(project, hook_url)
if hook:
self.hookObject = hook
return True
return False
def deleteHook(self):
if self._module.check_mode:
return True
return self.hookObject.delete()
def deprecation_warning(module):
deprecated_aliases = ['private_token', 'access_token', 'enable_ssl_verification']
for aliase in deprecated_aliases:
if aliase in module.params:
module.deprecate("Alias \'{aliase}\' is deprecated".format(aliase=aliase), "2.10")
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_token=dict(type='str', no_log=True, aliases=["private_token", "access_token"]),
state=dict(type='str', default="present", choices=["absent", "present"]),
project=dict(type='str', required=True),
hook_url=dict(type='str', required=True),
push_events=dict(type='bool', default=True),
issues_events=dict(type='bool', default=False),
merge_requests_events=dict(type='bool', default=False),
tag_push_events=dict(type='bool', default=False),
note_events=dict(type='bool', default=False),
job_events=dict(type='bool', default=False),
pipeline_events=dict(type='bool', default=False),
wiki_page_events=dict(type='bool', default=False),
hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']),
token=dict(type='str', no_log=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['api_username', 'api_token'],
['api_password', 'api_token']
],
required_together=[
['api_username', 'api_password']
],
required_one_of=[
['api_username', 'api_token']
],
supports_check_mode=True,
)
deprecation_warning(module)
gitlab_url = re.sub('/api.*', '', module.params['api_url'])
validate_certs = module.params['validate_certs']
gitlab_user = module.params['api_username']
gitlab_password = module.params['api_password']
gitlab_token = module.params['api_token']
state = module.params['state']
project_identifier = module.params['project']
hook_url = module.params['hook_url']
push_events = module.params['push_events']
issues_events = module.params['issues_events']
merge_requests_events = module.params['merge_requests_events']
tag_push_events = module.params['tag_push_events']
note_events = module.params['note_events']
job_events = module.params['job_events']
pipeline_events = module.params['pipeline_events']
wiki_page_events = module.params['wiki_page_events']
enable_ssl_verification = module.params['hook_validate_certs']
hook_token = module.params['token']
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
try:
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password,
private_token=gitlab_token, api_version=4)
gitlab_instance.auth()
except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
module.fail_json(msg="Failed to connect to Gitlab server: %s" % to_native(e))
except (gitlab.exceptions.GitlabHttpError) as e:
module.fail_json(msg="Failed to connect to Gitlab server: %s. \
Gitlab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e))
gitlab_hook = GitLabHook(module, gitlab_instance)
project = findProject(gitlab_instance, project_identifier)
if project is None:
module.fail_json(msg="Failed to create hook: project %s doesn't exists" % project_identifier)
hook_exists = gitlab_hook.existsHook(project, hook_url)
if state == 'absent':
if hook_exists:
gitlab_hook.deleteHook()
module.exit_json(changed=True, msg="Successfully deleted hook %s" % hook_url)
else:
module.exit_json(changed=False, msg="Hook deleted or does not exists")
if state == 'present':
if gitlab_hook.createOrUpdateHook(project, hook_url, {
"push_events": push_events,
"issues_events": issues_events,
"merge_requests_events": merge_requests_events,
"tag_push_events": tag_push_events,
"note_events": note_events,
"job_events": job_events,
"pipeline_events": pipeline_events,
"wiki_page_events": wiki_page_events,
"enable_ssl_verification": enable_ssl_verification,
"token": hook_token}):
module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs)
else:
module.exit_json(changed=False, msg="No need to update the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs)
if __name__ == '__main__':
main()
| gpl-2.0 |
pavel-odintsov/shogun | examples/undocumented/python_modular/serialization_string_kernels_modular.py | 15 | 5656 | #!/usr/bin/env python
from modshogun import WeightedDegreeStringKernel, LinearKernel, PolyKernel, GaussianKernel, CTaxonomy
from modshogun import CombinedKernel, WeightedDegreeRBFKernel
from modshogun import StringCharFeatures, RealFeatures, CombinedFeatures, StringWordFeatures, SortWordString
from modshogun import DNA, PROTEIN, Labels
from modshogun import WeightedDegreeStringKernel, CombinedKernel, WeightedCommWordStringKernel, WeightedDegreePositionStringKernel
from modshogun import StringCharFeatures, DNA, StringWordFeatures, CombinedFeatures
from modshogun import MSG_DEBUG
from modshogun import RealFeatures, BinaryLabels, DNA, Alphabet
from modshogun import WeightedDegreeStringKernel, GaussianKernel
from modshogun import SVMLight
from numpy import concatenate, ones
from numpy.random import randn, seed
import numpy
import sys
import types
import random
import bz2
import pickle
import inspect
###################################################
# Random Data
###################################################
def generate_random_string(length, number):
"""
generate sample over alphabet
"""
dat = []
alphabet = "AGTC"
for i in range(number):
dat.append("".join([random.choice(alphabet) for j in range(length)]))
return dat
def generate_random_data(number):
"""
create random examples and labels
"""
labels = numpy.array([random.choice([-1.0, 1.0]) for i in range(number)])
examples = numpy.array(generate_random_string(22, number))
return examples, labels
def save(filename, myobj):
"""
save object to file using pickle
@param filename: name of destination file
@type filename: str
@param myobj: object to save (has to be pickleable)
@type myobj: obj
"""
try:
f = bz2.BZ2File(filename, 'wb')
except IOError as details:
sys.stderr.write('File ' + filename + ' cannot be written\n')
sys.stderr.write(details)
return
pickle.dump(myobj, f, protocol=2)
f.close()
def load(filename):
"""
Load from filename using pickle
@param filename: name of file to load from
@type filename: str
"""
try:
f = bz2.BZ2File(filename, 'rb')
except IOError as details:
sys.stderr.write('File ' + filename + ' cannot be read\n')
sys.stderr.write(details)
return
myobj = pickle.load(f)
f.close()
return myobj
def get_spectrum_features(data, order=3, gap=0, reverse=True):
"""
create feature object used by spectrum kernel
"""
charfeat = StringCharFeatures(data, DNA)
feat = StringWordFeatures(charfeat.get_alphabet())
feat.obtain_from_char(charfeat, order-1, order, gap, reverse)
preproc = SortWordString()
preproc.init(feat)
feat.add_preprocessor(preproc)
feat.apply_preprocessor()
return feat
def get_wd_features(data, feat_type="dna"):
"""
create feature object for wdk
"""
if feat_type == "dna":
feat = StringCharFeatures(DNA)
elif feat_type == "protein":
feat = StringCharFeatures(PROTEIN)
else:
raise Exception("unknown feature type")
feat.set_features(data)
return feat
def construct_features(features):
"""
makes a list
"""
feat_all = [inst for inst in features]
feat_lhs = [inst[0:15] for inst in features]
feat_rhs = [inst[15:] for inst in features]
feat_wd = get_wd_features(feat_all)
feat_spec_1 = get_spectrum_features(feat_lhs, order=3)
feat_spec_2 = get_spectrum_features(feat_rhs, order=3)
feat_comb = CombinedFeatures()
feat_comb.append_feature_obj(feat_wd)
feat_comb.append_feature_obj(feat_spec_1)
feat_comb.append_feature_obj(feat_spec_2)
return feat_comb
parameter_list = [[200, 1, 100]]
def serialization_string_kernels_modular(n_data, num_shifts, size):
"""
serialize svm with string kernels
"""
##################################################
# set up toy data and svm
train_xt, train_lt = generate_random_data(n_data)
test_xt, test_lt = generate_random_data(n_data)
feats_train = construct_features(train_xt)
feats_test = construct_features(test_xt)
max_len = len(train_xt[0])
kernel_wdk = WeightedDegreePositionStringKernel(size, 5)
shifts_vector = numpy.ones(max_len, dtype=numpy.int32)*num_shifts
kernel_wdk.set_shifts(shifts_vector)
########
# set up spectrum
use_sign = False
kernel_spec_1 = WeightedCommWordStringKernel(size, use_sign)
kernel_spec_2 = WeightedCommWordStringKernel(size, use_sign)
########
# combined kernel
kernel = CombinedKernel()
kernel.append_kernel(kernel_wdk)
kernel.append_kernel(kernel_spec_1)
kernel.append_kernel(kernel_spec_2)
# init kernel
labels = BinaryLabels(train_lt);
svm = SVMLight(1.0, kernel, labels)
#svm.io.set_loglevel(MSG_DEBUG)
svm.train(feats_train)
##################################################
# serialize to file
fn = "serialized_svm.bz2"
#print("serializing SVM to file", fn)
save(fn, svm)
##################################################
# unserialize and sanity check
#print("unserializing SVM")
svm2 = load(fn)
#print("comparing predictions")
out = svm.apply(feats_test).get_labels()
out2 = svm2.apply(feats_test).get_labels()
# assert outputs are close
for i in range(len(out)):
assert abs(out[i] - out2[i] < 0.000001)
#print("all checks passed.")
return out,out2
if __name__=='__main__':
serialization_string_kernels_modular(*parameter_list[0])
| gpl-3.0 |
paradox41/heroprotocol | heroprotocol/protocols/protocol45635.py | 8 | 26828 | # Copyright (c) 2015 Blizzard Entertainment
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from heroprotocol.decoders import *
# Decoding instructions for each protocol type.
typeinfos = [
('_int',[(0,7)]), #0
('_int',[(0,4)]), #1
('_int',[(0,5)]), #2
('_int',[(0,6)]), #3
('_int',[(0,14)]), #4
('_int',[(0,22)]), #5
('_int',[(0,32)]), #6
('_choice',[(0,2),{0:('m_uint6',3),1:('m_uint14',4),2:('m_uint22',5),3:('m_uint32',6)}]), #7
('_struct',[[('m_userId',2,-1)]]), #8
('_blob',[(0,8)]), #9
('_int',[(0,8)]), #10
('_struct',[[('m_flags',10,0),('m_major',10,1),('m_minor',10,2),('m_revision',10,3),('m_build',6,4),('m_baseBuild',6,5)]]), #11
('_int',[(0,3)]), #12
('_bool',[]), #13
('_array',[(16,0),10]), #14
('_optional',[14]), #15
('_blob',[(16,0)]), #16
('_struct',[[('m_dataDeprecated',15,0),('m_data',16,1)]]), #17
('_struct',[[('m_signature',9,0),('m_version',11,1),('m_type',12,2),('m_elapsedGameLoops',6,3),('m_useScaledTime',13,4),('m_ngdpRootKey',17,5),('m_dataBuildNum',6,6),('m_fixedFileHash',17,7)]]), #18
('_fourcc',[]), #19
('_blob',[(0,7)]), #20
('_int',[(0,64)]), #21
('_struct',[[('m_region',10,0),('m_programId',19,1),('m_realm',6,2),('m_name',20,3),('m_id',21,4)]]), #22
('_struct',[[('m_a',10,0),('m_r',10,1),('m_g',10,2),('m_b',10,3)]]), #23
('_int',[(0,2)]), #24
('_optional',[10]), #25
('_struct',[[('m_name',9,0),('m_toon',22,1),('m_race',9,2),('m_color',23,3),('m_control',10,4),('m_teamId',1,5),('m_handicap',0,6),('m_observe',24,7),('m_result',24,8),('m_workingSetSlotId',25,9),('m_hero',9,10)]]), #26
('_array',[(0,5),26]), #27
('_optional',[27]), #28
('_blob',[(0,10)]), #29
('_blob',[(0,11)]), #30
('_struct',[[('m_file',30,0)]]), #31
('_optional',[13]), #32
('_int',[(-9223372036854775808,64)]), #33
('_blob',[(0,12)]), #34
('_blob',[(40,0)]), #35
('_array',[(0,6),35]), #36
('_optional',[36]), #37
('_array',[(0,6),30]), #38
('_optional',[38]), #39
('_struct',[[('m_playerList',28,0),('m_title',29,1),('m_difficulty',9,2),('m_thumbnail',31,3),('m_isBlizzardMap',13,4),('m_restartAsTransitionMap',32,16),('m_timeUTC',33,5),('m_timeLocalOffset',33,6),('m_description',34,7),('m_imageFilePath',30,8),('m_campaignIndex',10,15),('m_mapFileName',30,9),('m_cacheHandles',37,10),('m_miniSave',13,11),('m_gameSpeed',12,12),('m_defaultDifficulty',3,13),('m_modPaths',39,14)]]), #40
('_optional',[9]), #41
('_optional',[35]), #42
('_optional',[6]), #43
('_struct',[[('m_race',25,-1)]]), #44
('_struct',[[('m_team',25,-1)]]), #45
('_blob',[(0,9)]), #46
('_struct',[[('m_name',9,-18),('m_clanTag',41,-17),('m_clanLogo',42,-16),('m_highestLeague',25,-15),('m_combinedRaceLevels',43,-14),('m_randomSeed',6,-13),('m_racePreference',44,-12),('m_teamPreference',45,-11),('m_testMap',13,-10),('m_testAuto',13,-9),('m_examine',13,-8),('m_customInterface',13,-7),('m_testType',6,-6),('m_observe',24,-5),('m_hero',46,-4),('m_skin',46,-3),('m_mount',46,-2),('m_toonHandle',20,-1)]]), #47
('_array',[(0,5),47]), #48
('_struct',[[('m_lockTeams',13,-16),('m_teamsTogether',13,-15),('m_advancedSharedControl',13,-14),('m_randomRaces',13,-13),('m_battleNet',13,-12),('m_amm',13,-11),('m_competitive',13,-10),('m_practice',13,-9),('m_cooperative',13,-8),('m_noVictoryOrDefeat',13,-7),('m_heroDuplicatesAllowed',13,-6),('m_fog',24,-5),('m_observers',24,-4),('m_userDifficulty',24,-3),('m_clientDebugFlags',21,-2),('m_ammId',43,-1)]]), #49
('_int',[(1,4)]), #50
('_int',[(1,8)]), #51
('_bitarray',[(0,6)]), #52
('_bitarray',[(0,8)]), #53
('_bitarray',[(0,2)]), #54
('_bitarray',[(0,7)]), #55
('_struct',[[('m_allowedColors',52,-6),('m_allowedRaces',53,-5),('m_allowedDifficulty',52,-4),('m_allowedControls',53,-3),('m_allowedObserveTypes',54,-2),('m_allowedAIBuilds',55,-1)]]), #56
('_array',[(0,5),56]), #57
('_struct',[[('m_randomValue',6,-26),('m_gameCacheName',29,-25),('m_gameOptions',49,-24),('m_gameSpeed',12,-23),('m_gameType',12,-22),('m_maxUsers',2,-21),('m_maxObservers',2,-20),('m_maxPlayers',2,-19),('m_maxTeams',50,-18),('m_maxColors',3,-17),('m_maxRaces',51,-16),('m_maxControls',10,-15),('m_mapSizeX',10,-14),('m_mapSizeY',10,-13),('m_mapFileSyncChecksum',6,-12),('m_mapFileName',30,-11),('m_mapAuthorName',9,-10),('m_modFileSyncChecksum',6,-9),('m_slotDescriptions',57,-8),('m_defaultDifficulty',3,-7),('m_defaultAIBuild',0,-6),('m_cacheHandles',36,-5),('m_hasExtensionMod',13,-4),('m_isBlizzardMap',13,-3),('m_isPremadeFFA',13,-2),('m_isCoopMode',13,-1)]]), #58
('_optional',[1]), #59
('_optional',[2]), #60
('_struct',[[('m_color',60,-1)]]), #61
('_array',[(0,4),46]), #62
('_array',[(0,17),6]), #63
('_array',[(0,9),6]), #64
('_struct',[[('m_control',10,-20),('m_userId',59,-19),('m_teamId',1,-18),('m_colorPref',61,-17),('m_racePref',44,-16),('m_difficulty',3,-15),('m_aiBuild',0,-14),('m_handicap',0,-13),('m_observe',24,-12),('m_logoIndex',6,-11),('m_hero',46,-10),('m_skin',46,-9),('m_mount',46,-8),('m_artifacts',62,-7),('m_workingSetSlotId',25,-6),('m_rewards',63,-5),('m_toonHandle',20,-4),('m_licenses',64,-3),('m_tandemLeaderUserId',59,-2),('m_hasSilencePenalty',13,-1)]]), #65
('_array',[(0,5),65]), #66
('_struct',[[('m_phase',12,-11),('m_maxUsers',2,-10),('m_maxObservers',2,-9),('m_slots',66,-8),('m_randomSeed',6,-7),('m_hostUserId',59,-6),('m_isSinglePlayer',13,-5),('m_pickedMapTag',10,-4),('m_gameDuration',6,-3),('m_defaultDifficulty',3,-2),('m_defaultAIBuild',0,-1)]]), #67
('_struct',[[('m_userInitialData',48,-3),('m_gameDescription',58,-2),('m_lobbyState',67,-1)]]), #68
('_struct',[[('m_syncLobbyState',68,-1)]]), #69
('_struct',[[('m_name',20,-1)]]), #70
('_blob',[(0,6)]), #71
('_struct',[[('m_name',71,-1)]]), #72
('_struct',[[('m_name',71,-3),('m_type',6,-2),('m_data',20,-1)]]), #73
('_struct',[[('m_type',6,-3),('m_name',71,-2),('m_data',34,-1)]]), #74
('_array',[(0,5),10]), #75
('_struct',[[('m_signature',75,-2),('m_toonHandle',20,-1)]]), #76
('_struct',[[('m_gameFullyDownloaded',13,-14),('m_developmentCheatsEnabled',13,-13),('m_testCheatsEnabled',13,-12),('m_multiplayerCheatsEnabled',13,-11),('m_syncChecksummingEnabled',13,-10),('m_isMapToMapTransition',13,-9),('m_debugPauseEnabled',13,-8),('m_useGalaxyAsserts',13,-7),('m_platformMac',13,-6),('m_cameraFollow',13,-5),('m_baseBuildNum',6,-4),('m_buildNum',6,-3),('m_versionFlags',6,-2),('m_hotkeyProfile',46,-1)]]), #77
('_struct',[[]]), #78
('_int',[(0,16)]), #79
('_struct',[[('x',79,-2),('y',79,-1)]]), #80
('_struct',[[('m_which',12,-2),('m_target',80,-1)]]), #81
('_struct',[[('m_fileName',30,-5),('m_automatic',13,-4),('m_overwrite',13,-3),('m_name',9,-2),('m_description',29,-1)]]), #82
('_int',[(1,32)]), #83
('_struct',[[('m_sequence',83,-1)]]), #84
('_null',[]), #85
('_int',[(0,20)]), #86
('_int',[(-2147483648,32)]), #87
('_struct',[[('x',86,-3),('y',86,-2),('z',87,-1)]]), #88
('_struct',[[('m_targetUnitFlags',79,-7),('m_timer',10,-6),('m_tag',6,-5),('m_snapshotUnitLink',79,-4),('m_snapshotControlPlayerId',59,-3),('m_snapshotUpkeepPlayerId',59,-2),('m_snapshotPoint',88,-1)]]), #89
('_choice',[(0,2),{0:('None',85),1:('TargetPoint',88),2:('TargetUnit',89)}]), #90
('_struct',[[('m_target',90,-4),('m_time',87,-3),('m_verb',29,-2),('m_arguments',29,-1)]]), #91
('_struct',[[('m_data',91,-1)]]), #92
('_int',[(0,26)]), #93
('_struct',[[('m_abilLink',79,-3),('m_abilCmdIndex',2,-2),('m_abilCmdData',25,-1)]]), #94
('_optional',[94]), #95
('_choice',[(0,2),{0:('None',85),1:('TargetPoint',88),2:('TargetUnit',89),3:('Data',6)}]), #96
('_optional',[88]), #97
('_struct',[[('m_cmdFlags',93,-7),('m_abil',95,-6),('m_data',96,-5),('m_vector',97,-4),('m_sequence',83,-3),('m_otherUnit',43,-2),('m_unitGroup',43,-1)]]), #98
('_int',[(0,9)]), #99
('_bitarray',[(0,9)]), #100
('_array',[(0,9),99]), #101
('_choice',[(0,2),{0:('None',85),1:('Mask',100),2:('OneIndices',101),3:('ZeroIndices',101)}]), #102
('_struct',[[('m_unitLink',79,-4),('m_subgroupPriority',10,-3),('m_intraSubgroupPriority',10,-2),('m_count',99,-1)]]), #103
('_array',[(0,9),103]), #104
('_struct',[[('m_subgroupIndex',99,-4),('m_removeMask',102,-3),('m_addSubgroups',104,-2),('m_addUnitTags',64,-1)]]), #105
('_struct',[[('m_controlGroupId',1,-2),('m_delta',105,-1)]]), #106
('_struct',[[('m_controlGroupIndex',1,-3),('m_controlGroupUpdate',12,-2),('m_mask',102,-1)]]), #107
('_struct',[[('m_count',99,-6),('m_subgroupCount',99,-5),('m_activeSubgroupIndex',99,-4),('m_unitTagsChecksum',6,-3),('m_subgroupIndicesChecksum',6,-2),('m_subgroupsChecksum',6,-1)]]), #108
('_struct',[[('m_controlGroupId',1,-2),('m_selectionSyncData',108,-1)]]), #109
('_struct',[[('m_chatMessage',29,-1)]]), #110
('_struct',[[('m_speed',12,-1)]]), #111
('_int',[(-128,8)]), #112
('_struct',[[('m_delta',112,-1)]]), #113
('_struct',[[('x',87,-2),('y',87,-1)]]), #114
('_struct',[[('m_point',114,-4),('m_unit',6,-3),('m_pingedMinimap',13,-2),('m_option',87,-1)]]), #115
('_struct',[[('m_verb',29,-2),('m_arguments',29,-1)]]), #116
('_struct',[[('m_alliance',6,-2),('m_control',6,-1)]]), #117
('_struct',[[('m_unitTag',6,-1)]]), #118
('_struct',[[('m_unitTag',6,-2),('m_flags',10,-1)]]), #119
('_struct',[[('m_conversationId',87,-2),('m_replyId',87,-1)]]), #120
('_optional',[20]), #121
('_struct',[[('m_gameUserId',1,-6),('m_observe',24,-5),('m_name',9,-4),('m_toonHandle',121,-3),('m_clanTag',41,-2),('m_clanLogo',42,-1)]]), #122
('_array',[(0,5),122]), #123
('_int',[(0,1)]), #124
('_struct',[[('m_userInfos',123,-2),('m_method',124,-1)]]), #125
('_choice',[(0,3),{0:('None',85),1:('Checked',13),2:('ValueChanged',6),3:('SelectionChanged',87),4:('TextChanged',30),5:('MouseButton',6)}]), #126
('_struct',[[('m_controlId',87,-3),('m_eventType',87,-2),('m_eventData',126,-1)]]), #127
('_struct',[[('m_soundHash',6,-2),('m_length',6,-1)]]), #128
('_array',[(0,7),6]), #129
('_struct',[[('m_soundHash',129,-2),('m_length',129,-1)]]), #130
('_struct',[[('m_syncInfo',130,-1)]]), #131
('_struct',[[('m_queryId',79,-3),('m_lengthMs',6,-2),('m_finishGameLoop',6,-1)]]), #132
('_struct',[[('m_queryId',79,-2),('m_lengthMs',6,-1)]]), #133
('_struct',[[('m_animWaitQueryId',79,-1)]]), #134
('_struct',[[('m_sound',6,-1)]]), #135
('_struct',[[('m_transmissionId',87,-2),('m_thread',6,-1)]]), #136
('_struct',[[('m_transmissionId',87,-1)]]), #137
('_optional',[80]), #138
('_optional',[79]), #139
('_optional',[112]), #140
('_struct',[[('m_target',138,-6),('m_distance',139,-5),('m_pitch',139,-4),('m_yaw',139,-3),('m_reason',140,-2),('m_follow',13,-1)]]), #141
('_struct',[[('m_skipType',124,-1)]]), #142
('_int',[(0,11)]), #143
('_struct',[[('x',143,-2),('y',143,-1)]]), #144
('_struct',[[('m_button',6,-5),('m_down',13,-4),('m_posUI',144,-3),('m_posWorld',88,-2),('m_flags',112,-1)]]), #145
('_struct',[[('m_posUI',144,-3),('m_posWorld',88,-2),('m_flags',112,-1)]]), #146
('_struct',[[('m_achievementLink',79,-1)]]), #147
('_struct',[[('m_hotkey',6,-2),('m_down',13,-1)]]), #148
('_struct',[[('m_abilLink',79,-3),('m_abilCmdIndex',2,-2),('m_state',112,-1)]]), #149
('_struct',[[('m_soundtrack',6,-1)]]), #150
('_struct',[[('m_key',112,-2),('m_flags',112,-1)]]), #151
('_struct',[[('m_error',87,-2),('m_abil',95,-1)]]), #152
('_int',[(0,19)]), #153
('_struct',[[('m_decrementMs',153,-1)]]), #154
('_struct',[[('m_portraitId',87,-1)]]), #155
('_struct',[[('m_functionName',20,-1)]]), #156
('_struct',[[('m_result',87,-1)]]), #157
('_struct',[[('m_gameMenuItemIndex',87,-1)]]), #158
('_int',[(-32768,16)]), #159
('_struct',[[('m_wheelSpin',159,-2),('m_flags',112,-1)]]), #160
('_struct',[[('m_button',79,-1)]]), #161
('_struct',[[('m_cutsceneId',87,-2),('m_bookmarkName',20,-1)]]), #162
('_struct',[[('m_cutsceneId',87,-1)]]), #163
('_struct',[[('m_cutsceneId',87,-3),('m_conversationLine',20,-2),('m_altConversationLine',20,-1)]]), #164
('_struct',[[('m_cutsceneId',87,-2),('m_conversationLine',20,-1)]]), #165
('_struct',[[('m_leaveReason',1,-1)]]), #166
('_struct',[[('m_observe',24,-7),('m_name',9,-6),('m_toonHandle',121,-5),('m_clanTag',41,-4),('m_clanLogo',42,-3),('m_hijack',13,-2),('m_hijackCloneGameUserId',59,-1)]]), #167
('_optional',[83]), #168
('_struct',[[('m_state',24,-2),('m_sequence',168,-1)]]), #169
('_struct',[[('m_sequence',168,-2),('m_target',88,-1)]]), #170
('_struct',[[('m_sequence',168,-2),('m_target',89,-1)]]), #171
('_struct',[[('m_catalog',10,-4),('m_entry',79,-3),('m_field',9,-2),('m_value',9,-1)]]), #172
('_struct',[[('m_index',6,-1)]]), #173
('_struct',[[('m_shown',13,-1)]]), #174
('_struct',[[('m_recipient',12,-2),('m_string',30,-1)]]), #175
('_struct',[[('m_recipient',12,-2),('m_point',114,-1)]]), #176
('_struct',[[('m_progress',87,-1)]]), #177
('_struct',[[('m_status',24,-1)]]), #178
('_struct',[[('m_abilLink',79,-3),('m_abilCmdIndex',2,-2),('m_buttonLink',79,-1)]]), #179
('_struct',[[('m_behaviorLink',79,-2),('m_buttonLink',79,-1)]]), #180
('_choice',[(0,2),{0:('None',85),1:('Ability',179),2:('Behavior',180),3:('Vitals',159)}]), #181
('_struct',[[('m_announcement',181,-3),('m_otherUnitTag',6,-2),('m_unitTag',6,-1)]]), #182
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2),('m_controlPlayerId',1,3),('m_upkeepPlayerId',1,4),('m_x',10,5),('m_y',10,6)]]), #183
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_x',10,2),('m_y',10,3)]]), #184
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_killerPlayerId',59,2),('m_x',10,3),('m_y',10,4),('m_killerUnitTagIndex',43,5),('m_killerUnitTagRecycle',43,6)]]), #185
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_controlPlayerId',1,2),('m_upkeepPlayerId',1,3)]]), #186
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2)]]), #187
('_struct',[[('m_playerId',1,0),('m_upgradeTypeName',29,1),('m_count',87,2)]]), #188
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1)]]), #189
('_array',[(0,10),87]), #190
('_struct',[[('m_firstUnitIndex',6,0),('m_items',190,1)]]), #191
('_struct',[[('m_playerId',1,0),('m_type',6,1),('m_userId',43,2),('m_slotId',43,3)]]), #192
('_struct',[[('m_key',29,0)]]), #193
('_struct',[[('__parent',193,0),('m_value',29,1)]]), #194
('_array',[(0,6),194]), #195
('_optional',[195]), #196
('_struct',[[('__parent',193,0),('m_value',87,1)]]), #197
('_array',[(0,6),197]), #198
('_optional',[198]), #199
('_struct',[[('m_eventName',29,0),('m_stringData',196,1),('m_intData',199,2),('m_fixedData',199,3)]]), #200
('_struct',[[('m_value',6,0),('m_time',6,1)]]), #201
('_array',[(0,6),201]), #202
('_array',[(0,5),202]), #203
('_struct',[[('m_name',29,0),('m_values',203,1)]]), #204
('_array',[(0,21),204]), #205
('_struct',[[('m_instanceList',205,0)]]), #206
]
# Map from protocol NNet.Game.*Event eventid to (typeid, name)
game_event_types = {
5: (78, 'NNet.Game.SUserFinishedLoadingSyncEvent'),
7: (77, 'NNet.Game.SUserOptionsEvent'),
9: (70, 'NNet.Game.SBankFileEvent'),
10: (72, 'NNet.Game.SBankSectionEvent'),
11: (73, 'NNet.Game.SBankKeyEvent'),
12: (74, 'NNet.Game.SBankValueEvent'),
13: (76, 'NNet.Game.SBankSignatureEvent'),
14: (81, 'NNet.Game.SCameraSaveEvent'),
21: (82, 'NNet.Game.SSaveGameEvent'),
22: (78, 'NNet.Game.SSaveGameDoneEvent'),
23: (78, 'NNet.Game.SLoadGameDoneEvent'),
25: (84, 'NNet.Game.SCommandManagerResetEvent'),
26: (92, 'NNet.Game.SGameCheatEvent'),
27: (98, 'NNet.Game.SCmdEvent'),
28: (106, 'NNet.Game.SSelectionDeltaEvent'),
29: (107, 'NNet.Game.SControlGroupUpdateEvent'),
30: (109, 'NNet.Game.SSelectionSyncCheckEvent'),
32: (110, 'NNet.Game.STriggerChatMessageEvent'),
34: (111, 'NNet.Game.SSetAbsoluteGameSpeedEvent'),
35: (113, 'NNet.Game.SAddAbsoluteGameSpeedEvent'),
36: (115, 'NNet.Game.STriggerPingEvent'),
37: (116, 'NNet.Game.SBroadcastCheatEvent'),
38: (117, 'NNet.Game.SAllianceEvent'),
39: (118, 'NNet.Game.SUnitClickEvent'),
40: (119, 'NNet.Game.SUnitHighlightEvent'),
41: (120, 'NNet.Game.STriggerReplySelectedEvent'),
43: (125, 'NNet.Game.SHijackReplayGameEvent'),
44: (78, 'NNet.Game.STriggerSkippedEvent'),
45: (128, 'NNet.Game.STriggerSoundLengthQueryEvent'),
46: (135, 'NNet.Game.STriggerSoundOffsetEvent'),
47: (136, 'NNet.Game.STriggerTransmissionOffsetEvent'),
48: (137, 'NNet.Game.STriggerTransmissionCompleteEvent'),
49: (141, 'NNet.Game.SCameraUpdateEvent'),
50: (78, 'NNet.Game.STriggerAbortMissionEvent'),
55: (127, 'NNet.Game.STriggerDialogControlEvent'),
56: (131, 'NNet.Game.STriggerSoundLengthSyncEvent'),
57: (142, 'NNet.Game.STriggerConversationSkippedEvent'),
58: (145, 'NNet.Game.STriggerMouseClickedEvent'),
59: (146, 'NNet.Game.STriggerMouseMovedEvent'),
60: (147, 'NNet.Game.SAchievementAwardedEvent'),
61: (148, 'NNet.Game.STriggerHotkeyPressedEvent'),
62: (149, 'NNet.Game.STriggerTargetModeUpdateEvent'),
64: (150, 'NNet.Game.STriggerSoundtrackDoneEvent'),
66: (151, 'NNet.Game.STriggerKeyPressedEvent'),
67: (156, 'NNet.Game.STriggerMovieFunctionEvent'),
76: (152, 'NNet.Game.STriggerCommandErrorEvent'),
86: (78, 'NNet.Game.STriggerMovieStartedEvent'),
87: (78, 'NNet.Game.STriggerMovieFinishedEvent'),
88: (154, 'NNet.Game.SDecrementGameTimeRemainingEvent'),
89: (155, 'NNet.Game.STriggerPortraitLoadedEvent'),
90: (157, 'NNet.Game.STriggerCustomDialogDismissedEvent'),
91: (158, 'NNet.Game.STriggerGameMenuItemSelectedEvent'),
92: (160, 'NNet.Game.STriggerMouseWheelEvent'),
95: (161, 'NNet.Game.STriggerButtonPressedEvent'),
96: (78, 'NNet.Game.STriggerGameCreditsFinishedEvent'),
97: (162, 'NNet.Game.STriggerCutsceneBookmarkFiredEvent'),
98: (163, 'NNet.Game.STriggerCutsceneEndSceneFiredEvent'),
99: (164, 'NNet.Game.STriggerCutsceneConversationLineEvent'),
100: (165, 'NNet.Game.STriggerCutsceneConversationLineMissingEvent'),
101: (166, 'NNet.Game.SGameUserLeaveEvent'),
102: (167, 'NNet.Game.SGameUserJoinEvent'),
103: (169, 'NNet.Game.SCommandManagerStateEvent'),
104: (170, 'NNet.Game.SCmdUpdateTargetPointEvent'),
105: (171, 'NNet.Game.SCmdUpdateTargetUnitEvent'),
106: (132, 'NNet.Game.STriggerAnimLengthQueryByNameEvent'),
107: (133, 'NNet.Game.STriggerAnimLengthQueryByPropsEvent'),
108: (134, 'NNet.Game.STriggerAnimOffsetEvent'),
109: (172, 'NNet.Game.SCatalogModifyEvent'),
110: (173, 'NNet.Game.SHeroTalentTreeSelectedEvent'),
111: (78, 'NNet.Game.STriggerProfilerLoggingFinishedEvent'),
112: (174, 'NNet.Game.SHeroTalentTreeSelectionPanelToggledEvent'),
}
# The typeid of the NNet.Game.EEventId enum.
game_eventid_typeid = 0
# Map from protocol NNet.Game.*Message eventid to (typeid, name)
message_event_types = {
0: (175, 'NNet.Game.SChatMessage'),
1: (176, 'NNet.Game.SPingMessage'),
2: (177, 'NNet.Game.SLoadingProgressMessage'),
3: (78, 'NNet.Game.SServerPingMessage'),
4: (178, 'NNet.Game.SReconnectNotifyMessage'),
5: (182, 'NNet.Game.SPlayerAnnounceMessage'),
}
# The typeid of the NNet.Game.EMessageId enum.
message_eventid_typeid = 1
# Map from protocol NNet.Replay.Tracker.*Event eventid to (typeid, name)
tracker_event_types = {
1: (183, 'NNet.Replay.Tracker.SUnitBornEvent'),
2: (185, 'NNet.Replay.Tracker.SUnitDiedEvent'),
3: (186, 'NNet.Replay.Tracker.SUnitOwnerChangeEvent'),
4: (187, 'NNet.Replay.Tracker.SUnitTypeChangeEvent'),
5: (188, 'NNet.Replay.Tracker.SUpgradeEvent'),
6: (183, 'NNet.Replay.Tracker.SUnitInitEvent'),
7: (189, 'NNet.Replay.Tracker.SUnitDoneEvent'),
8: (191, 'NNet.Replay.Tracker.SUnitPositionsEvent'),
9: (192, 'NNet.Replay.Tracker.SPlayerSetupEvent'),
10: (200, 'NNet.Replay.Tracker.SStatGameEvent'),
11: (206, 'NNet.Replay.Tracker.SScoreResultEvent'),
12: (184, 'NNet.Replay.Tracker.SUnitRevivedEvent'),
}
# The typeid of the NNet.Replay.Tracker.EEventId enum.
tracker_eventid_typeid = 2
# The typeid of NNet.SVarUint32 (the type used to encode gameloop deltas).
svaruint32_typeid = 7
# The typeid of NNet.Replay.SGameUserId (the type used to encode player ids).
replay_userid_typeid = 8
# The typeid of NNet.Replay.SHeader (the type used to store replay game version and length).
replay_header_typeid = 18
# The typeid of NNet.Game.SDetails (the type used to store overall replay details).
game_details_typeid = 40
# The typeid of NNet.Replay.SInitData (the type used to store the inital lobby).
replay_initdata_typeid = 69
def _varuint32_value(value):
# Returns the numeric value from a SVarUint32 instance.
for k,v in value.iteritems():
return v
return 0
def _decode_event_stream(decoder, eventid_typeid, event_types, decode_user_id):
# Decodes events prefixed with a gameloop and possibly userid
gameloop = 0
while not decoder.done():
start_bits = decoder.used_bits()
# decode the gameloop delta before each event
delta = _varuint32_value(decoder.instance(svaruint32_typeid))
gameloop += delta
# decode the userid before each event
if decode_user_id:
userid = decoder.instance(replay_userid_typeid)
# decode the event id
eventid = decoder.instance(eventid_typeid)
typeid, typename = event_types.get(eventid, (None, None))
if typeid is None:
raise CorruptedError('eventid(%d) at %s' % (eventid, decoder))
# decode the event struct instance
event = decoder.instance(typeid)
event['_event'] = typename
event['_eventid'] = eventid
# insert gameloop and userid
event['_gameloop'] = gameloop
if decode_user_id:
event['_userid'] = userid
# the next event is byte aligned
decoder.byte_align()
# insert bits used in stream
event['_bits'] = decoder.used_bits() - start_bits
yield event
def decode_replay_game_events(contents):
"""Decodes and yields each game event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
game_eventid_typeid,
game_event_types,
decode_user_id=True):
yield event
def decode_replay_message_events(contents):
"""Decodes and yields each message event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
message_eventid_typeid,
message_event_types,
decode_user_id=True):
yield event
def decode_replay_tracker_events(contents):
"""Decodes and yields each tracker event from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
tracker_eventid_typeid,
tracker_event_types,
decode_user_id=False):
yield event
def decode_replay_header(contents):
"""Decodes and return the replay header from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(replay_header_typeid)
def decode_replay_details(contents):
"""Decodes and returns the game details from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(game_details_typeid)
def decode_replay_initdata(contents):
"""Decodes and return the replay init data from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
return decoder.instance(replay_initdata_typeid)
def decode_replay_attributes_events(contents):
"""Decodes and yields each attribute from the contents byte string."""
buffer = BitPackedBuffer(contents, 'little')
attributes = {}
if not buffer.done():
attributes['source'] = buffer.read_bits(8)
attributes['mapNamespace'] = buffer.read_bits(32)
count = buffer.read_bits(32)
attributes['scopes'] = {}
while not buffer.done():
value = {}
value['namespace'] = buffer.read_bits(32)
value['attrid'] = attrid = buffer.read_bits(32)
scope = buffer.read_bits(8)
value['value'] = buffer.read_aligned_bytes(4)[::-1].strip('\x00')
if not scope in attributes['scopes']:
attributes['scopes'][scope] = {}
if not attrid in attributes['scopes'][scope]:
attributes['scopes'][scope][attrid] = []
attributes['scopes'][scope][attrid].append(value)
return attributes
def unit_tag(unitTagIndex, unitTagRecycle):
return (unitTagIndex << 18) + unitTagRecycle
def unit_tag_index(unitTag):
return (unitTag >> 18) & 0x00003fff
def unit_tag_recycle(unitTag):
return (unitTag) & 0x0003ffff
| mit |
cjellick/rancher | tests/integration/suite/test_cluster_role_template_bindings.py | 4 | 2526 | import pytest
from .common import random_str
from .conftest import wait_for
from rancher import ApiError
def test_cannot_target_users_and_group(admin_mc, remove_resource):
"""Asserts that a clusterroletemplatebinding cannot target both
user and group subjects"""
admin_client = admin_mc.client
with pytest.raises(ApiError) as e:
crtb = admin_client.create_cluster_role_template_binding(
name="crtb-"+random_str(),
clusterId="local",
userId=admin_mc.user.id,
groupPrincipalId="someauthprovidergroupid",
roleTemplateId="clustercatalogs-view")
remove_resource(crtb)
assert e.value.error.status == 422
assert "must target a user [userId]/[userPrincipalId] OR a group " \
"[groupId]/[groupPrincipalId]" in e.value.error.message
def test_must_have_target(admin_mc, remove_resource):
"""Asserts that a clusterroletemplatebinding must have a subject"""
admin_client = admin_mc.client
with pytest.raises(ApiError) as e:
crtb = admin_client.create_cluster_role_template_binding(
name="crtb-" + random_str(),
clusterId="local",
roleTemplateId="clustercatalogs-view")
remove_resource(crtb)
assert e.value.error.status == 422
assert "must target a user [userId]/[userPrincipalId] OR a group " \
"[groupId]/[groupPrincipalId]" in e.value.error.message
def test_cannot_update_subjects_or_cluster(admin_mc, remove_resource):
"""Asserts non-metadata fields cannot be updated"""
admin_client = admin_mc.client
old_crtb = admin_client.create_cluster_role_template_binding(
name="crtb-" + random_str(),
clusterId="local",
userId=admin_mc.user.id,
roleTemplateId="clustercatalogs-view")
remove_resource(old_crtb)
wait_for(lambda: admin_client.reload(old_crtb).userPrincipalId is not None)
old_crtb = admin_client.reload(old_crtb)
crtb = admin_client.update_by_id_cluster_role_template_binding(
id=old_crtb.id,
clusterId="fakecluster",
userId="",
userPrincipalId="asdf",
groupPrincipalId="asdf",
group="asdf"
)
assert crtb.get("clusterId") == old_crtb.get("clusterId")
assert crtb.get("userId") == old_crtb.get("userId")
assert crtb.get("userPrincipalId") == old_crtb.get("userPrincipalId")
assert crtb.get("groupPrincipalId") == old_crtb.get("groupPrincipalId")
assert crtb.get("group") == old_crtb.get("group")
| apache-2.0 |
svn2github/django | tests/regressiontests/bulk_create/tests.py | 13 | 2641 | from __future__ import absolute_import
from operator import attrgetter
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from .models import Country, Restaurant, Pizzeria, State
class BulkCreateTests(TestCase):
def setUp(self):
self.data = [
Country(name="United States of America", iso_two_letter="US"),
Country(name="The Netherlands", iso_two_letter="NL"),
Country(name="Germany", iso_two_letter="DE"),
Country(name="Czech Republic", iso_two_letter="CZ")
]
def test_simple(self):
created = Country.objects.bulk_create(self.data)
self.assertEqual(len(created), 4)
self.assertQuerysetEqual(Country.objects.order_by("-name"), [
"United States of America", "The Netherlands", "Germany", "Czech Republic"
], attrgetter("name"))
created = Country.objects.bulk_create([])
self.assertEqual(created, [])
self.assertEqual(Country.objects.count(), 4)
@skipUnlessDBFeature("has_bulk_insert")
def test_efficiency(self):
with self.assertNumQueries(1):
Country.objects.bulk_create(self.data)
def test_inheritance(self):
Restaurant.objects.bulk_create([
Restaurant(name="Nicholas's")
])
self.assertQuerysetEqual(Restaurant.objects.all(), [
"Nicholas's",
], attrgetter("name"))
with self.assertRaises(ValueError):
Pizzeria.objects.bulk_create([
Pizzeria(name="The Art of Pizza")
])
self.assertQuerysetEqual(Pizzeria.objects.all(), [])
self.assertQuerysetEqual(Restaurant.objects.all(), [
"Nicholas's",
], attrgetter("name"))
def test_non_auto_increment_pk(self):
with self.assertNumQueries(1):
State.objects.bulk_create([
State(two_letter_code=s)
for s in ["IL", "NY", "CA", "ME"]
])
self.assertQuerysetEqual(State.objects.order_by("two_letter_code"), [
"CA", "IL", "ME", "NY",
], attrgetter("two_letter_code"))
@skipIfDBFeature('allows_primary_key_0')
def test_zero_as_autoval(self):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for automatic primary key.
"""
valid_country = Country(name='Germany', iso_two_letter='DE')
invalid_country = Country(id=0, name='Poland', iso_two_letter='PL')
with self.assertRaises(ValueError):
Country.objects.bulk_create([valid_country, invalid_country])
| bsd-3-clause |
2014c2g2/teamwork | wsgi/static/reeborg/src/libraries/brython/Lib/operator.py | 51 | 7713 | #!/usr/bin/env python3
"""
Operator Interface
This module exports a set of functions corresponding to the intrinsic
operators of Python. For example, operator.add(x, y) is equivalent
to the expression x+y. The function names are those used for special
methods; variants without leading and trailing '__' are also provided
for convenience.
This is the pure Python implementation of the module.
"""
# downloaded from http://bugs.python.org/file28327/operator.py
#import builtins as _bi #there is no builtins module
def lt(a, b):
"Same as a < b."
return a < b
__lt__ = lt
def le(a, b):
"Same as a <= b."
return a <= b
__le__ = le
def eq(a, b):
"Same as a == b."
return a == b
__eq__ = eq
def ne(a, b):
"Same as a != b."
return a != b
__ne__ = ne
def ge(a, b):
"Same as a >= b."
return a >= b
__ge__ = ge
def gt(a, b):
"Same as a > b."
return a > b
__gt__ = gt
def not_(a):
"Same as not a."
return not a
__not__ = not_
def truth(a):
"Return True if a is true, False otherwise."
#return _bi.bool(a)
return bool(a)
def is_(a, b):
"Same as a is b."
return a is b
# brython does not like (causes syntax error)
#def is_not(a, b):
# "Same as a is not b."
# return a is not b
#recursion error or just comment out and add code below function
#def abs(a):
# "Same as abs(a)."
# #return _bi.abs(a)
# return abs(a)
__abs__ = abs
abs=abs
def add(a, b):
"Same as a + b."
return a + b
__add__ = add
def and_(a, b):
"Same as a & b."
return a & b
__and__ = and_
def floordiv(a, b):
"Same as a // b."
return a // b
__floordiv__ = floordiv
def index(a):
"Same as a.__index__()."
return a.__index__()
__index__ = index
def inv(a):
"Same as ~a."
#return ~a #brython does not like
return a^(2**31)
invert = __inv__ = __invert__ = inv
def lshift(a, b):
"Same as a << b."
return a << b
__lshift__ = lshift
def mod(a, b):
"Same as a % b."
return a % b
__mod__ = mod
def mul(a, b):
"Same as a * b."
return a * b
__mul__ = mul
def neg(a):
"Same as -a."
return -a
__neg__ = neg
def or_(a, b):
"Same as a | b."
return a | b
__or__ = or_
def pos(a):
"Same as +a."
#return +a #brython does not like
if a >= 0: return a
return -a
__pos__ = pos
def pow(a, b):
"Same as a ** b."
return a ** b
__pow__ = pow
def rshift(a, b):
"Same as a >> b."
return a >> b
__rshift__ = rshift
def sub(a, b):
"Same as a - b."
return a - b
__sub__ = sub
def truediv(a, b):
"Same as a / b."
return a / b
__truediv__ = truediv
def xor(a, b):
"Same as a ^ b."
return a ^ b
__xor__ = xor
def concat(a, b):
"Same as a + b, for a and b sequences."
if not (hasattr(a, '__getitem__') and hasattr(b, '__getitem__')):
raise TypeError('a and b must be sequences')
return a + b
__concat__ = concat
def contains(a, b):
"Same as b in a (note reversed operands)."
return b in a
__contains__ = contains
def countOf(a, b):
"Return the number of times b occurs in a."
count = 0
for i in a:
if i == b:
count += 1
return count
def delitem(a, b):
"Same as del a[b]."
del a[b]
__delitem__ = delitem
def getitem(a, b):
"Same as a[b]."
return a[b]
__getitem__ = getitem
#fixme brython doesn't like this function
#def indexOf(a, b):
# "Return the first index of b in a."
# for i, j in _bi.enumerate(a):
# if j == b:
# return i
# else:
# raise ValueError('b not found in a')
def setitem(a, b, c):
"Same as a[b] = c."
a[b] = c
__setitem__ = setitem
class attrgetter:
"""
Return a callable object that fetches the given attribute(s) from its operand.
After f=attrgetter('name'), the call f(r) returns r.name.
After g=attrgetter('name', 'date'), the call g(r) returns (r.name, r.date).
After h=attrgetter('name.first', 'name.last'), the call h(r) returns
(r.name.first, r.name.last).
"""
def __init__(self, attr, *attrs):
self._attrs = (attr,)
self._attrs += attrs
if any(not isinstance(attr, str) for attr in self._attrs):
raise TypeError('attribute name must be a string')
@staticmethod
def _resolve_attr(obj, attr):
for name in attr.split('.'):
#obj = _bi.getattr(obj, name)
obj = getattr(obj, name)
return obj
def __call__(self, obj):
if len(self._attrs) == 1:
return self._resolve_attr(obj, self._attrs[0])
return tuple(self._resolve_attr(obj, attr) for attr in self._attrs)
class itemgetter:
"""
Return a callable object that fetches the given item(s) from its operand.
After f=itemgetter(2), the call f(r) returns r[2].
After g=itemgetter(2,5,3), the call g(r) returns (r[2], r[5], r[3])
"""
def __init__(self, item, *items):
self._items = (item,)
self._items += items
def __call__(self, obj):
if len(self._items) == 1:
return obj[self._items[0]]
return tuple(obj[item] for item in self._items)
class methodcaller:
"""
Return a callable object that calls the given method on its operand.
After f = methodcaller('name'), the call f(r) returns r.name().
After g = methodcaller('name', 'date', foo=1), the call g(r) returns
r.name('date', foo=1).
"""
def __init__(self, name, *args, **kwargs):
self._name = name
self._args = args
self._kwargs = kwargs
def __call__(self, obj):
return getattr(obj, self._name)(*self._args, **self._kwargs)
def iadd(a, b):
"Same as a += b."
a += b
return a
__iadd__ = iadd
def iand(a, b):
"Same as a &= b."
a &= b
return a
__iand__ = iand
def iconcat(a, b):
"Same as a += b, for a and b sequences."
if not (hasattr(a, '__getitem__') and hasattr(b, '__getitem__')):
raise TypeError('a and b must be sequences')
a += b
return a
__iconcat__ = iconcat
def ifloordiv(a, b):
"Same as a //= b."
a //= b
return a
__ifloordiv__ = ifloordiv
def ilshift(a, b):
"Same as a <<= b."
a <<= b
return a
__ilshift__ = ilshift
def imod(a, b):
"Same as a %= b."
a %= b
return a
__imod__ = imod
def imul(a, b):
"Same as a *= b."
a *= b
return a
__imul__ = imul
def ior(a, b):
"Same as a |= b."
a |= b
return a
__ior__ = ior
def ipow(a, b):
"Same as a **= b."
a **=b
return a
__ipow__ = ipow
def irshift(a, b):
"Same as a >>= b."
a >>= b
return a
__irshift__ = irshift
def isub(a, b):
"Same as a -= b."
a -= b
return a
__isub__ = isub
def itruediv(a, b):
"Same as a /= b."
a /= b
return a
__itruediv__ = itruediv
def ixor(a, b):
"Same as a ^= b."
a ^= b
return a
__ixor__ = ixor
def length_hint(obj, default=0):
"""
Return an estimate of the number of items in obj.
This is useful for presizing containers when building from an iterable.
If the object supports len(), the result will be exact. Otherwise, it may
over- or under-estimate by an arbitrary amount. The result will be an
integer >= 0.
"""
try:
return len(obj)
except TypeError:
try:
val = obj.__length_hint__()
if val is NotImplemented:
raise TypeError
except (AttributeError, TypeError):
return default
else:
if not val > 0:
raise ValueError('default must be > 0')
return val
#try:
# from _operator import *
# from _operator import __doc__
#except ImportError:
# pass
| gpl-2.0 |
mathcamp/steward_palantir | steward_palantir/client.py | 1 | 6597 | """ Client commands """
from datetime import datetime
from pprint import pprint
from steward.colors import green, red, yellow, magenta
def _fuzzy_timedelta(td):
""" Format a timedelta into a *loose* 'X time ago' string """
ago_str = lambda x, y: '%d %s%s ago' % (x, y, 's' if x > 1 else '')
if td.days > 0:
return ago_str(td.days, 'day')
hours = td.seconds / 3600
if hours > 0:
return ago_str(hours, 'hour')
minutes = td.seconds / 60
if minutes > 0:
return ago_str(minutes, 'minute')
return ago_str(td.seconds, 'second')
def _format_check_status(status):
""" Turn a check status into a nicely-formatted string """
string = status['check'] + ': '
if status['retcode'] == 0:
string += "SUCCESS"
color = green
elif status['retcode'] == 1:
string += "WARNING"
color = yellow
else:
string += "ERROR(%d)" % status['retcode']
color = red
string = color(string)
if not status.get('enabled', True):
string += ' (disabled)'
if 'last_run' in status:
ran_at = datetime.fromtimestamp(status['last_run'])
string += '\nRan at %s (%s)' % (ran_at.isoformat(),
_fuzzy_timedelta(datetime.now() -
ran_at))
else:
ran_at = datetime.fromtimestamp(status['created'])
string += '\nCreated at %s (%s)' % (ran_at.isoformat(),
_fuzzy_timedelta(datetime.now() -
ran_at))
if status.get('stdout'):
string += "\nSTDOUT:\n%s" % status['stdout']
if status.get('stderr'):
string += "\nSTDERR:\n%s" % status['stderr']
return string
def do_alerts(client):
""" Print all active alerts """
response = client.cmd('palantir/alert/list').json()
# Sort by minion, then by check name
response.sort(key=lambda x: x['check'])
response.sort(key=lambda x: x['minion'])
for alert in response:
alert['name'] = alert['check']
color = yellow if alert['retcode'] == 1 else red
print "{} - {}".format(color(alert['minion']),
_format_check_status(alert))
def do_checks(client, check=None):
"""
List the Palantir checks or print details of one in particular
Parameters
----------
check : str, optional
If specified, print out the details of this check
"""
response = client.cmd('palantir/check/list').json()
if check is None:
for name, check in response.iteritems():
line = name
if not check['enabled']:
line += ' (disabled)'
print line
else:
pprint(response[check])
def do_minions(client):
""" Print the list of minions """
response = client.cmd('palantir/minion/list').json()
for name in sorted(response):
minion = response[name]
if minion['enabled']:
print minion['name']
else:
print minion['name'] + ' (disabled)'
def do_status(client, minion, check=None):
"""
Print the result of the last check on a minion
Parameters
----------
minion : str
Name of the minion
check : str, optional
Name of the check. If not provided, print all checks.
"""
if check is None:
response = client.cmd('palantir/minion/get', minion=minion).json()
header = response['name']
if not response['enabled']:
header += ' (disabled)'
print magenta('-' * len(header))
print magenta(header)
for check in response['checks']:
print _format_check_status(check)
else:
response = client.cmd('palantir/minion/check/get', minion=minion,
check=check).json()
if response is None:
print "Check %s not found on %s" % (check, minion)
return
response['name'] = check
print _format_check_status(response)
def do_run_check(client, check):
"""
Run a Palantir check
Parameters
----------
check : str
Name of the check to run
"""
response = client.cmd('palantir/check/run', name=check).json()
if isinstance(response, basestring):
print response
else:
for minion, result in response.iteritems():
result['name'] = check
print '{}: {}'.format(green(minion), _format_check_status(result))
def do_resolve(client, minion, check):
"""
Mark an alert as resolved
Parameters
----------
minion : str
Name of the minion
check : str
Name of the check
"""
alert = {'minion': minion, 'check': check}
client.cmd('palantir/alert/resolve', alerts=[alert])
def do_minion_enable(client, *minions):
"""
Enable one or more minions
Parameters
----------
*minions : list
The minions to enable
"""
client.cmd('palantir/minion/toggle', minions=minions, enabled=True)
def do_minion_disable(client, *minions):
"""
Disable one or more minions
Parameters
----------
*minions : list
The minions to disable
"""
client.cmd('palantir/minion/toggle', minions=minions, enabled=False)
def do_check_enable(client, *checks):
"""
Enable one or more checks
Parameters
----------
*checks : list
The checks to enable
"""
client.cmd('palantir/check/toggle', checks=checks, enabled=True)
def do_check_disable(client, *checks):
"""
Disable one or more checks
Parameters
----------
*checks : list
The checks to disable
"""
client.cmd('palantir/check/toggle', checks=checks, enabled=False)
def do_minion_check_enable(client, minion, *checks):
"""
Enable one or more checks on a specific minion
Parameters
----------
minion : str
The minion to enable checks on
*checks : list
The checks to enable on the minion
"""
client.cmd('palantir/minion/check/toggle', minion=minion, checks=checks,
enabled=True)
def do_minion_check_disable(client, minion, *checks):
"""
Disable one or more checks on a specific minion
Parameters
----------
minion : str
The minions to disable checks on
*checks : list
The checks to disable on the minion
"""
client.cmd('palantir/minion/check/toggle', minion=minion, checks=checks,
enabled=False)
| mit |
spoki0/AI3 | AI3/opencv/sources/samples/python2/dft.py | 7 | 2627 | #!/usr/bin/env python
import cv2
import numpy as np
import sys
def shift_dft(src, dst=None):
'''
Rearrange the quadrants of Fourier image so that the origin is at
the image center. Swaps quadrant 1 with 3, and 2 with 4.
src and dst arrays must be equal size & type
'''
if dst is None:
dst = np.empty(src.shape, src.dtype)
elif src.shape != dst.shape:
raise ValueError("src and dst must have equal sizes")
elif src.dtype != dst.dtype:
raise TypeError("src and dst must have equal types")
if src is dst:
ret = np.empty(src.shape, src.dtype)
else:
ret = dst
h, w = src.shape[:2]
cx1 = cx2 = w/2
cy1 = cy2 = h/2
# if the size is odd, then adjust the bottom/right quadrants
if w % 2 != 0:
cx2 += 1
if h % 2 != 0:
cy2 += 1
# swap quadrants
# swap q1 and q3
ret[h-cy1:, w-cx1:] = src[0:cy1 , 0:cx1 ] # q1 -> q3
ret[0:cy2 , 0:cx2 ] = src[h-cy2:, w-cx2:] # q3 -> q1
# swap q2 and q4
ret[0:cy2 , w-cx2:] = src[h-cy2:, 0:cx2 ] # q2 -> q4
ret[h-cy1:, 0:cx1 ] = src[0:cy1 , w-cx1:] # q4 -> q2
if src is dst:
dst[:,:] = ret
return dst
if __name__ == "__main__":
if len(sys.argv)>1:
im = cv2.imread(sys.argv[1])
else :
im = cv2.imread('../c/baboon.jpg')
print "usage : python dft.py <image_file>"
# convert to grayscale
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
h, w = im.shape[:2]
realInput = im.astype(np.float64)
# perform an optimally sized dft
dft_M = cv2.getOptimalDFTSize(w)
dft_N = cv2.getOptimalDFTSize(h)
# copy A to dft_A and pad dft_A with zeros
dft_A = np.zeros((dft_N, dft_M, 2), dtype=np.float64)
dft_A[:h, :w, 0] = realInput
# no need to pad bottom part of dft_A with zeros because of
# use of nonzeroRows parameter in cv2.dft()
cv2.dft(dft_A, dst=dft_A, nonzeroRows=h)
cv2.imshow("win", im)
# Split fourier into real and imaginary parts
image_Re, image_Im = cv2.split(dft_A)
# Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
magnitude = cv2.sqrt(image_Re**2.0 + image_Im**2.0)
# Compute log(1 + Mag)
log_spectrum = cv2.log(1.0 + magnitude)
# Rearrange the quadrants of Fourier image so that the origin is at
# the image center
shift_dft(log_spectrum, log_spectrum)
# normalize and display the results as rgb
cv2.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv2.cv.CV_MINMAX)
cv2.imshow("magnitude", log_spectrum)
cv2.waitKey(0)
cv2.destroyAllWindows()
| mit |
zhoffice/minos | owl/utils/quota_injector.py | 5 | 2553 | import logging
import os
import sys
import time
from urlparse import urlparse
root_path = os.path.abspath(
os.path.dirname(os.path.realpath(__file__))+ '/../..')
opentsdb_path = os.path.join(root_path, 'opentsdb')
sys.path.append(opentsdb_path)
tsdb_register = __import__('tsdb_register')
from tsdb_register import conf_path
from tsdb_register import TsdbRegister
owl_config_path = os.path.join(conf_path, 'owl')
sys.path.append(owl_config_path)
owl_config = __import__('owl_config')
tsdb_host, tsdb_port = urlparse(owl_config.TSDB_ADDR).netloc.split(':')
logger_quota = logging.getLogger('quota')
# the quota items need to calculate the total value
QUOTA_TOTAL_DICT = {
'used_quota': (int, 0),
'used_space_quota': (int, 0),
}
class QuotaInjector():
'''
Push quota information into opentsdb
'''
def __init__(self):
self.tsdb_register = TsdbRegister()
def check_quota_new_keys(self, quota_list):
if len(quota_list) > 0:
for quota_key in quota_list[0].keys():
if quota_key not in self.tsdb_register.register_keys and quota_key != 'name':
self.tsdb_register.new_keys.append(quota_key)
self.tsdb_register.register_keys.add(quota_key)
self.tsdb_register.register_new_keys_to_tsdb()
def push_quota_to_tsdb(self, quota_list, cluster_name):
self.check_quota_new_keys(quota_list)
timestamp = int(time.time())
# reset the quota_total_dict
quota_total_dict = dict.fromkeys(QUOTA_TOTAL_DICT, 0)
# push every user's quota to tsdb for cluster_name
for quota_dict in quota_list:
for quota_key, quota_value in quota_dict.iteritems():
if quota_key != 'name':
if not quota_value.isdigit():
quota_value = '0'
quota_record = "%s %d %d user_id=%s cluster=%s" % (
quota_key, timestamp, int(quota_value), quota_dict['name'], cluster_name)
put_operation = 'echo put %s | nc -w 10 %s %s' % (quota_record, tsdb_host, tsdb_port)
logger_quota.info(put_operation)
os.system(put_operation)
if quota_key in quota_total_dict.keys():
quota_total_dict[quota_key] += int(quota_value)
# push the total values to tsdb
for quota_key, quota_value in quota_total_dict.iteritems():
quota_record = "%s %d %d user_id=%s cluster=%s" % (
quota_key, timestamp, quota_value, quota_key+'_total', cluster_name)
put_operation = 'echo put %s | nc -w 10 %s %s' % (quota_record, tsdb_host, tsdb_port)
logger_quota.info(put_operation)
os.system(put_operation)
| apache-2.0 |
Azure/azure-sdk-for-python | sdk/eventhub/azure-eventhub/tests/livetest/asynctests/test_consumer_client_async.py | 1 | 7698 | import pytest
import asyncio
from azure.eventhub import EventData
from azure.eventhub.aio import EventHubConsumerClient
from azure.eventhub.aio._eventprocessor.in_memory_checkpoint_store import InMemoryCheckpointStore
from azure.eventhub._constants import ALL_PARTITIONS
@pytest.mark.liveTest
@pytest.mark.asyncio
async def test_receive_no_partition_async(connstr_senders):
connection_str, senders = connstr_senders
senders[0].send(EventData("Test EventData"))
senders[1].send(EventData("Test EventData"))
client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
async def on_event(partition_context, event):
on_event.received += 1
await partition_context.update_checkpoint(event)
on_event.namespace = partition_context.fully_qualified_namespace
on_event.eventhub_name = partition_context.eventhub_name
on_event.consumer_group = partition_context.consumer_group
on_event.offset = event.offset
on_event.sequence_number = event.sequence_number
on_event.received = 0
on_event.namespace = None
on_event.eventhub_name = None
on_event.consumer_group = None
on_event.offset = None
on_event.sequence_number = None
async with client:
task = asyncio.ensure_future(
client.receive(on_event, starting_position="-1"))
await asyncio.sleep(10)
assert on_event.received == 2
checkpoints = await list(client._event_processors.values())[0]._checkpoint_store.list_checkpoints(
on_event.namespace, on_event.eventhub_name, on_event.consumer_group
)
assert len([checkpoint for checkpoint in checkpoints if checkpoint["offset"] == on_event.offset]) > 0
assert len(
[checkpoint for checkpoint in checkpoints if checkpoint["sequence_number"] == on_event.sequence_number]) > 0
await task
@pytest.mark.liveTest
@pytest.mark.asyncio
async def test_receive_partition_async(connstr_senders):
connection_str, senders = connstr_senders
senders[0].send(EventData("Test EventData"))
client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
async def on_event(partition_context, event):
assert partition_context.partition_id == "0"
assert partition_context.consumer_group == "$default"
assert partition_context.fully_qualified_namespace in connection_str
assert partition_context.eventhub_name == senders[0]._client.eventhub_name
on_event.received += 1
on_event.received = 0
async with client:
task = asyncio.ensure_future(
client.receive(on_event, partition_id="0", starting_position="-1"))
await asyncio.sleep(10)
assert on_event.received == 1
await task
@pytest.mark.liveTest
@pytest.mark.asyncio
async def test_receive_load_balancing_async(connstr_senders):
connection_str, senders = connstr_senders
cs = InMemoryCheckpointStore()
client1 = EventHubConsumerClient.from_connection_string(
connection_str, consumer_group='$default', checkpoint_store=cs, load_balancing_interval=1)
client2 = EventHubConsumerClient.from_connection_string(
connection_str, consumer_group='$default', checkpoint_store=cs, load_balancing_interval=1)
async def on_event(partition_context, event):
pass
async with client1, client2:
task1 = asyncio.ensure_future(
client1.receive(on_event, starting_position="-1"))
await asyncio.sleep(3.3)
task2 = asyncio.ensure_future(
client2.receive(on_event, starting_position="-1"))
await asyncio.sleep(20)
assert len(client1._event_processors[("$default", ALL_PARTITIONS)]._tasks) == 1
assert len(client2._event_processors[("$default", ALL_PARTITIONS)]._tasks) == 1
await task1
await task2
@pytest.mark.liveTest
@pytest.mark.asyncio
async def test_receive_batch_no_max_wait_time_async(connstr_senders):
'''Test whether callback is called when max_wait_time is None and max_batch_size has reached
'''
connection_str, senders = connstr_senders
senders[0].send(EventData("Test EventData"))
senders[1].send(EventData("Test EventData"))
client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
async def on_event_batch(partition_context, event_batch):
on_event_batch.received += len(event_batch)
await partition_context.update_checkpoint()
on_event_batch.namespace = partition_context.fully_qualified_namespace
on_event_batch.eventhub_name = partition_context.eventhub_name
on_event_batch.consumer_group = partition_context.consumer_group
on_event_batch.offset = event_batch[-1].offset
on_event_batch.sequence_number = event_batch[-1].sequence_number
on_event_batch.received = 0
on_event_batch.namespace = None
on_event_batch.eventhub_name = None
on_event_batch.consumer_group = None
on_event_batch.offset = None
on_event_batch.sequence_number = None
async with client:
task = asyncio.ensure_future(
client.receive_batch(on_event_batch, starting_position="-1"))
await asyncio.sleep(10)
assert on_event_batch.received == 2
checkpoints = await list(client._event_processors.values())[0]._checkpoint_store.list_checkpoints(
on_event_batch.namespace, on_event_batch.eventhub_name, on_event_batch.consumer_group
)
assert len([checkpoint for checkpoint in checkpoints if checkpoint["offset"] == on_event_batch.offset]) > 0
assert len(
[checkpoint for checkpoint in checkpoints if checkpoint["sequence_number"] == on_event_batch.sequence_number]) > 0
await task
@pytest.mark.parametrize("max_wait_time, sleep_time, expected_result",
[(3, 10, []),
(3, 2, None),
])
@pytest.mark.liveTest
@pytest.mark.asyncio
async def test_receive_batch_empty_with_max_wait_time_async(connection_str, max_wait_time, sleep_time, expected_result):
'''Test whether event handler is called when max_wait_time > 0 and no event is received
'''
client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
async def on_event_batch(partition_context, event_batch):
on_event_batch.event_batch = event_batch
on_event_batch.event_batch = None
async with client:
task = asyncio.ensure_future(
client.receive_batch(on_event_batch, max_wait_time=max_wait_time, starting_position="-1"))
await asyncio.sleep(sleep_time)
assert on_event_batch.event_batch == expected_result
await task
@pytest.mark.liveTest
@pytest.mark.asyncio
async def test_receive_batch_early_callback_async(connstr_senders):
''' Test whether the callback is called once max_batch_size reaches and before max_wait_time reaches.
'''
connection_str, senders = connstr_senders
for _ in range(10):
senders[0].send(EventData("Test EventData"))
client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
async def on_event_batch(partition_context, event_batch):
on_event_batch.received += len(event_batch)
on_event_batch.received = 0
async with client:
task = asyncio.ensure_future(
client.receive_batch(
on_event_batch, max_batch_size=10, max_wait_time=100, starting_position="-1", partition_id="0"))
await asyncio.sleep(10)
assert on_event_batch.received == 10
await task
| mit |
fedorpatlin/ansible | lib/ansible/modules/network/citrix/netscaler.py | 36 | 5338 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage Citrix NetScaler entities
(c) 2013, Nandor Sivok <nandor@gawker.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler
version_added: "1.1"
short_description: Manages Citrix NetScaler entities
description:
- Manages Citrix NetScaler server and service entities.
options:
nsc_host:
description:
- hostname or ip of your netscaler
required: true
default: null
aliases: []
nsc_protocol:
description:
- protocol used to access netscaler
required: false
default: https
aliases: []
user:
description:
- username
required: true
default: null
aliases: []
password:
description:
- password
required: true
default: null
aliases: []
action:
description:
- the action you want to perform on the entity
required: false
default: disable
choices: ["enable", "disable"]
aliases: []
name:
description:
- name of the entity
required: true
default: hostname
aliases: []
type:
description:
- type of the entity
required: false
default: server
choices: ["server", "service"]
aliases: []
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
requirements: []
author: "Nandor Sivok (@dominis)"
'''
EXAMPLES = '''
# Disable the server
- netscaler:
nsc_host: nsc.example.com
user: apiuser
password: apipass
# Enable the server
- netscaler:
nsc_host: nsc.example.com
user: apiuser
password: apipass
action: enable
# Disable the service local:8080
- netscaler:
nsc_host: nsc.example.com
user: apiuser
password: apipass
name: 'local:8080'
type: service
action: disable
'''
import base64
import socket
import urllib
class netscaler(object):
_nitro_base_url = '/nitro/v1/'
def __init__(self, module):
self.module = module
def http_request(self, api_endpoint, data_json={}):
request_url = self._nsc_protocol + '://' + self._nsc_host + self._nitro_base_url + api_endpoint
data_json = urllib.urlencode(data_json)
if not len(data_json):
data_json = None
auth = base64.encodestring('%s:%s' % (self._nsc_user, self._nsc_pass)).replace('\n', '').strip()
headers = {
'Authorization': 'Basic %s' % auth,
'Content-Type' : 'application/x-www-form-urlencoded',
}
response, info = fetch_url(self.module, request_url, data=data_json, headers=headers)
return json.load(response)
def prepare_request(self, action):
resp = self.http_request(
'config',
{
"object":
{
"params": {"action": action},
self._type: {"name": self._name}
}
}
)
return resp
def core(module):
n = netscaler(module)
n._nsc_host = module.params.get('nsc_host')
n._nsc_user = module.params.get('user')
n._nsc_pass = module.params.get('password')
n._nsc_protocol = module.params.get('nsc_protocol')
n._name = module.params.get('name')
n._type = module.params.get('type')
action = module.params.get('action')
r = n.prepare_request(action)
return r['errorcode'], r
def main():
module = AnsibleModule(
argument_spec = dict(
nsc_host = dict(required=True),
nsc_protocol = dict(default='https'),
user = dict(required=True),
password = dict(required=True, no_log=True),
action = dict(default='enable', choices=['enable','disable']),
name = dict(default=socket.gethostname()),
type = dict(default='server', choices=['service', 'server']),
validate_certs=dict(default='yes', type='bool'),
)
)
rc = 0
try:
rc, result = core(module)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
if rc != 0:
module.fail_json(rc=rc, msg=result)
else:
result['changed'] = True
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
| gpl-3.0 |
persandstrom/home-assistant | homeassistant/components/binary_sensor/command_line.py | 7 | 3447 | """
Support for custom shell commands to retrieve values.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.command_line/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.binary_sensor import (
BinarySensorDevice, DEVICE_CLASSES_SCHEMA, PLATFORM_SCHEMA)
from homeassistant.components.sensor.command_line import CommandSensorData
from homeassistant.const import (
CONF_PAYLOAD_OFF, CONF_PAYLOAD_ON, CONF_NAME, CONF_VALUE_TEMPLATE,
CONF_COMMAND, CONF_DEVICE_CLASS)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Binary Command Sensor'
DEFAULT_PAYLOAD_ON = 'ON'
DEFAULT_PAYLOAD_OFF = 'OFF'
SCAN_INTERVAL = timedelta(seconds=60)
CONF_COMMAND_TIMEOUT = 'command_timeout'
DEFAULT_TIMEOUT = 15
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COMMAND): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(
CONF_COMMAND_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Command line Binary Sensor."""
name = config.get(CONF_NAME)
command = config.get(CONF_COMMAND)
payload_off = config.get(CONF_PAYLOAD_OFF)
payload_on = config.get(CONF_PAYLOAD_ON)
device_class = config.get(CONF_DEVICE_CLASS)
value_template = config.get(CONF_VALUE_TEMPLATE)
command_timeout = config.get(CONF_COMMAND_TIMEOUT)
if value_template is not None:
value_template.hass = hass
data = CommandSensorData(hass, command, command_timeout)
add_entities([CommandBinarySensor(
hass, data, name, device_class, payload_on, payload_off,
value_template)], True)
class CommandBinarySensor(BinarySensorDevice):
"""Representation of a command line binary sensor."""
def __init__(self, hass, data, name, device_class, payload_on,
payload_off, value_template):
"""Initialize the Command line binary sensor."""
self._hass = hass
self.data = data
self._name = name
self._device_class = device_class
self._state = False
self._payload_on = payload_on
self._payload_off = payload_off
self._value_template = value_template
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
@ property
def device_class(self):
"""Return the class of the binary sensor."""
return self._device_class
def update(self):
"""Get the latest data and updates the state."""
self.data.update()
value = self.data.value
if self._value_template is not None:
value = self._value_template.render_with_possible_json_value(
value, False)
if value == self._payload_on:
self._state = True
elif value == self._payload_off:
self._state = False
| apache-2.0 |
Distrotech/scons | build/scons/build/lib/SCons/Tool/f77.py | 2 | 2067 | """engine.SCons.Tool.f77
Tool-specific initialization for the generic Posix f77 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/f77.py 2014/01/04 01:12:18 root"
import SCons.Defaults
import SCons.Scanner.Fortran
import SCons.Tool
import SCons.Util
from SCons.Tool.FortranCommon import add_all_to_env, add_f77_to_env
compilers = ['f77']
def generate(env):
add_all_to_env(env)
add_f77_to_env(env)
fcomp = env.Detect(compilers) or 'f77'
env['F77'] = fcomp
env['SHF77'] = fcomp
env['FORTRAN'] = fcomp
env['SHFORTRAN'] = fcomp
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
bikong2/django | tests/queryset_pickle/tests.py | 209 | 6081 | from __future__ import unicode_literals
import datetime
import pickle
import unittest
import warnings
from django.test import TestCase
from django.utils import six
from django.utils.encoding import force_text
from django.utils.version import get_version
from .models import Container, Event, Group, Happening, M2MModel
class PickleabilityTestCase(TestCase):
def setUp(self):
Happening.objects.create() # make sure the defaults are working (#20158)
def assert_pickles(self, qs):
self.assertEqual(list(pickle.loads(pickle.dumps(qs))), list(qs))
def test_related_field(self):
g = Group.objects.create(name="Ponies Who Own Maybachs")
self.assert_pickles(Event.objects.filter(group=g.id))
def test_datetime_callable_default_all(self):
self.assert_pickles(Happening.objects.all())
def test_datetime_callable_default_filter(self):
self.assert_pickles(Happening.objects.filter(when=datetime.datetime.now()))
def test_string_as_default(self):
self.assert_pickles(Happening.objects.filter(name="test"))
def test_standalone_method_as_default(self):
self.assert_pickles(Happening.objects.filter(number1=1))
@unittest.skipIf(six.PY2, "Field doesn't exist on Python 2.")
def test_staticmethod_as_default(self):
self.assert_pickles(Happening.objects.filter(number2=1))
def test_filter_reverse_fk(self):
self.assert_pickles(Group.objects.filter(event=1))
def test_doesnotexist_exception(self):
# Ticket #17776
original = Event.DoesNotExist("Doesn't exist")
unpickled = pickle.loads(pickle.dumps(original))
# Exceptions are not equal to equivalent instances of themselves, so
# can't just use assertEqual(original, unpickled)
self.assertEqual(original.__class__, unpickled.__class__)
self.assertEqual(original.args, unpickled.args)
def test_manager_pickle(self):
pickle.loads(pickle.dumps(Happening.objects))
def test_model_pickle(self):
"""
Test that a model not defined on module level is pickleable.
"""
original = Container.SomeModel(pk=1)
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
# Also, deferred dynamic model works
Container.SomeModel.objects.create(somefield=1)
original = Container.SomeModel.objects.defer('somefield')[0]
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
self.assertEqual(original.somefield, reloaded.somefield)
def test_model_pickle_m2m(self):
"""
Test intentionally the automatically created through model.
"""
m1 = M2MModel.objects.create()
g1 = Group.objects.create(name='foof')
m1.groups.add(g1)
m2m_through = M2MModel._meta.get_field('groups').remote_field.through
original = m2m_through.objects.get()
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
def test_model_pickle_dynamic(self):
class Meta:
proxy = True
dynclass = type(str("DynamicEventSubclass"), (Event, ),
{'Meta': Meta, '__module__': Event.__module__})
original = dynclass(pk=1)
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
self.assertIs(reloaded.__class__, dynclass)
def test_specialized_queryset(self):
self.assert_pickles(Happening.objects.values('name'))
self.assert_pickles(Happening.objects.values('name').dates('when', 'year'))
# With related field (#14515)
self.assert_pickles(
Event.objects.select_related('group').order_by('title').values_list('title', 'group__name')
)
def test_pickle_prefetch_related_idempotence(self):
g = Group.objects.create(name='foo')
groups = Group.objects.prefetch_related('event_set')
# First pickling
groups = pickle.loads(pickle.dumps(groups))
self.assertQuerysetEqual(groups, [g], lambda x: x)
# Second pickling
groups = pickle.loads(pickle.dumps(groups))
self.assertQuerysetEqual(groups, [g], lambda x: x)
def test_pickle_prefetch_related_with_m2m_and_objects_deletion(self):
"""
#24831 -- Cached properties on ManyToOneRel created in QuerySet.delete()
caused subsequent QuerySet pickling to fail.
"""
g = Group.objects.create(name='foo')
m2m = M2MModel.objects.create()
m2m.groups.add(g)
Group.objects.all().delete()
m2ms = M2MModel.objects.prefetch_related('groups')
m2ms = pickle.loads(pickle.dumps(m2ms))
self.assertQuerysetEqual(m2ms, [m2m], lambda x: x)
def test_missing_django_version_unpickling(self):
"""
#21430 -- Verifies a warning is raised for querysets that are
unpickled without a Django version
"""
qs = Group.missing_django_version_objects.all()
with warnings.catch_warnings(record=True) as recorded:
pickle.loads(pickle.dumps(qs))
msg = force_text(recorded.pop().message)
self.assertEqual(msg,
"Pickled queryset instance's Django version is not specified.")
def test_unsupported_unpickle(self):
"""
#21430 -- Verifies a warning is raised for querysets that are
unpickled with a different Django version than the current
"""
qs = Group.previous_django_version_objects.all()
with warnings.catch_warnings(record=True) as recorded:
pickle.loads(pickle.dumps(qs))
msg = force_text(recorded.pop().message)
self.assertEqual(
msg,
"Pickled queryset instance's Django version 1.0 does not "
"match the current version %s." % get_version()
)
| bsd-3-clause |
RobinD42/pyside | tests/QtCore/qobject_inherits_test.py | 6 | 1896 | '''Test cases for QObject methods'''
import unittest
from PySide.QtCore import QObject
class InheritsCase(unittest.TestCase):
'''Test case for QObject.inherits'''
def testCppInheritance(self):
#QObject.inherits() for c++ classes
#An class inherits itself
self.assert_(QObject().inherits('QObject'))
def testPythonInheritance(self):
#QObject.inherits() for python classes
class Dummy(QObject):
#Dummy class
pass
self.assert_(Dummy().inherits('QObject'))
self.assert_(Dummy().inherits('Dummy'))
self.assert_(not Dummy().inherits('FooBar'))
def testPythonMultiInheritance(self):
#QObject.inherits() for multiple inheritance
# QObject.inherits(classname) should fail if classname isn't a
# QObject subclass
class Parent(object):
#Dummy parent
pass
class Dummy(QObject, Parent):
#Dummy class
pass
self.assert_(Dummy().inherits('QObject'))
self.assert_(not Dummy().inherits('Parent'))
def testSetAttributeBeforeCallingInitOnQObjectDerived(self):
'''Test for bug #428.'''
class DerivedObject(QObject):
def __init__(self):
self.member = 'member'
QObject.__init__(self)
obj0 = DerivedObject()
# The second instantiation of DerivedObject will generate an exception
# that will not come to surface immediately.
obj1 = DerivedObject()
# The mere calling of the object method causes
# the exception to "reach the surface".
obj1.objectName()
def testMultipleInheritance(self):
def declareClass():
class Foo(object, QObject):
pass
self.assertRaises(TypeError, declareClass)
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
koyuawsmbrtn/eclock | windows/Python27/Lib/site-packages/pip/_vendor/requests/packages/urllib3/poolmanager.py | 550 | 8977 | # urllib3/poolmanager.py
# Copyright 2008-2014 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
try: # Python 3
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .request import RequestMethods
from .util import parse_url
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version')
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example: ::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
This method is used to actually create the connection pools handed out
by :meth:`connection_from_url` and companion methods. It is intended
to be overridden for customization.
"""
pool_cls = pool_classes_by_scheme[scheme]
kwargs = self.connection_pool_kw
if scheme == 'http':
kwargs = self.connection_pool_kw.copy()
for kw in SSL_KEYWORDS:
kwargs.pop(kw, None)
return pool_cls(host, port, **kwargs)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http'):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
scheme = scheme or 'http'
port = port or port_by_scheme.get(scheme, 80)
pool_key = (scheme, host, port)
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(scheme, host, port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url` but
doesn't pass any additional parameters to the
:class:`urllib3.connectionpool.ConnectionPool` constructor.
Additional parameters are taken from the :class:`.PoolManager`
constructor.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 2616, Section 10.3.4
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown
kw['redirect'] = redirect
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary contaning headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
assert self.proxy.scheme in ("http", "https"), \
'Not supported proxy scheme %s' % self.proxy.scheme
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http'):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
kw['headers'] = self._set_proxy_headers(url, kw.get('headers',
self.headers))
return super(ProxyManager, self).urlopen(method, url, redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
| gpl-2.0 |
mrquim/repository.mrquim | repo/script.module.unidecode/lib/unidecode/x082.py | 252 | 4649 | data = (
'Yao ', # 0x00
'Yu ', # 0x01
'Chong ', # 0x02
'Xi ', # 0x03
'Xi ', # 0x04
'Jiu ', # 0x05
'Yu ', # 0x06
'Yu ', # 0x07
'Xing ', # 0x08
'Ju ', # 0x09
'Jiu ', # 0x0a
'Xin ', # 0x0b
'She ', # 0x0c
'She ', # 0x0d
'Yadoru ', # 0x0e
'Jiu ', # 0x0f
'Shi ', # 0x10
'Tan ', # 0x11
'Shu ', # 0x12
'Shi ', # 0x13
'Tian ', # 0x14
'Dan ', # 0x15
'Pu ', # 0x16
'Pu ', # 0x17
'Guan ', # 0x18
'Hua ', # 0x19
'Tan ', # 0x1a
'Chuan ', # 0x1b
'Shun ', # 0x1c
'Xia ', # 0x1d
'Wu ', # 0x1e
'Zhou ', # 0x1f
'Dao ', # 0x20
'Gang ', # 0x21
'Shan ', # 0x22
'Yi ', # 0x23
'[?] ', # 0x24
'Pa ', # 0x25
'Tai ', # 0x26
'Fan ', # 0x27
'Ban ', # 0x28
'Chuan ', # 0x29
'Hang ', # 0x2a
'Fang ', # 0x2b
'Ban ', # 0x2c
'Que ', # 0x2d
'Hesaki ', # 0x2e
'Zhong ', # 0x2f
'Jian ', # 0x30
'Cang ', # 0x31
'Ling ', # 0x32
'Zhu ', # 0x33
'Ze ', # 0x34
'Duo ', # 0x35
'Bo ', # 0x36
'Xian ', # 0x37
'Ge ', # 0x38
'Chuan ', # 0x39
'Jia ', # 0x3a
'Lu ', # 0x3b
'Hong ', # 0x3c
'Pang ', # 0x3d
'Xi ', # 0x3e
'[?] ', # 0x3f
'Fu ', # 0x40
'Zao ', # 0x41
'Feng ', # 0x42
'Li ', # 0x43
'Shao ', # 0x44
'Yu ', # 0x45
'Lang ', # 0x46
'Ting ', # 0x47
'[?] ', # 0x48
'Wei ', # 0x49
'Bo ', # 0x4a
'Meng ', # 0x4b
'Nian ', # 0x4c
'Ju ', # 0x4d
'Huang ', # 0x4e
'Shou ', # 0x4f
'Zong ', # 0x50
'Bian ', # 0x51
'Mao ', # 0x52
'Die ', # 0x53
'[?] ', # 0x54
'Bang ', # 0x55
'Cha ', # 0x56
'Yi ', # 0x57
'Sao ', # 0x58
'Cang ', # 0x59
'Cao ', # 0x5a
'Lou ', # 0x5b
'Dai ', # 0x5c
'Sori ', # 0x5d
'Yao ', # 0x5e
'Tong ', # 0x5f
'Yofune ', # 0x60
'Dang ', # 0x61
'Tan ', # 0x62
'Lu ', # 0x63
'Yi ', # 0x64
'Jie ', # 0x65
'Jian ', # 0x66
'Huo ', # 0x67
'Meng ', # 0x68
'Qi ', # 0x69
'Lu ', # 0x6a
'Lu ', # 0x6b
'Chan ', # 0x6c
'Shuang ', # 0x6d
'Gen ', # 0x6e
'Liang ', # 0x6f
'Jian ', # 0x70
'Jian ', # 0x71
'Se ', # 0x72
'Yan ', # 0x73
'Fu ', # 0x74
'Ping ', # 0x75
'Yan ', # 0x76
'Yan ', # 0x77
'Cao ', # 0x78
'Cao ', # 0x79
'Yi ', # 0x7a
'Le ', # 0x7b
'Ting ', # 0x7c
'Qiu ', # 0x7d
'Ai ', # 0x7e
'Nai ', # 0x7f
'Tiao ', # 0x80
'Jiao ', # 0x81
'Jie ', # 0x82
'Peng ', # 0x83
'Wan ', # 0x84
'Yi ', # 0x85
'Chai ', # 0x86
'Mian ', # 0x87
'Mie ', # 0x88
'Gan ', # 0x89
'Qian ', # 0x8a
'Yu ', # 0x8b
'Yu ', # 0x8c
'Shuo ', # 0x8d
'Qiong ', # 0x8e
'Tu ', # 0x8f
'Xia ', # 0x90
'Qi ', # 0x91
'Mang ', # 0x92
'Zi ', # 0x93
'Hui ', # 0x94
'Sui ', # 0x95
'Zhi ', # 0x96
'Xiang ', # 0x97
'Bi ', # 0x98
'Fu ', # 0x99
'Tun ', # 0x9a
'Wei ', # 0x9b
'Wu ', # 0x9c
'Zhi ', # 0x9d
'Qi ', # 0x9e
'Shan ', # 0x9f
'Wen ', # 0xa0
'Qian ', # 0xa1
'Ren ', # 0xa2
'Fou ', # 0xa3
'Kou ', # 0xa4
'Jie ', # 0xa5
'Lu ', # 0xa6
'Xu ', # 0xa7
'Ji ', # 0xa8
'Qin ', # 0xa9
'Qi ', # 0xaa
'Yuan ', # 0xab
'Fen ', # 0xac
'Ba ', # 0xad
'Rui ', # 0xae
'Xin ', # 0xaf
'Ji ', # 0xb0
'Hua ', # 0xb1
'Hua ', # 0xb2
'Fang ', # 0xb3
'Wu ', # 0xb4
'Jue ', # 0xb5
'Gou ', # 0xb6
'Zhi ', # 0xb7
'Yun ', # 0xb8
'Qin ', # 0xb9
'Ao ', # 0xba
'Chu ', # 0xbb
'Mao ', # 0xbc
'Ya ', # 0xbd
'Fei ', # 0xbe
'Reng ', # 0xbf
'Hang ', # 0xc0
'Cong ', # 0xc1
'Yin ', # 0xc2
'You ', # 0xc3
'Bian ', # 0xc4
'Yi ', # 0xc5
'Susa ', # 0xc6
'Wei ', # 0xc7
'Li ', # 0xc8
'Pi ', # 0xc9
'E ', # 0xca
'Xian ', # 0xcb
'Chang ', # 0xcc
'Cang ', # 0xcd
'Meng ', # 0xce
'Su ', # 0xcf
'Yi ', # 0xd0
'Yuan ', # 0xd1
'Ran ', # 0xd2
'Ling ', # 0xd3
'Tai ', # 0xd4
'Tiao ', # 0xd5
'Di ', # 0xd6
'Miao ', # 0xd7
'Qiong ', # 0xd8
'Li ', # 0xd9
'Yong ', # 0xda
'Ke ', # 0xdb
'Mu ', # 0xdc
'Pei ', # 0xdd
'Bao ', # 0xde
'Gou ', # 0xdf
'Min ', # 0xe0
'Yi ', # 0xe1
'Yi ', # 0xe2
'Ju ', # 0xe3
'Pi ', # 0xe4
'Ruo ', # 0xe5
'Ku ', # 0xe6
'Zhu ', # 0xe7
'Ni ', # 0xe8
'Bo ', # 0xe9
'Bing ', # 0xea
'Shan ', # 0xeb
'Qiu ', # 0xec
'Yao ', # 0xed
'Xian ', # 0xee
'Ben ', # 0xef
'Hong ', # 0xf0
'Ying ', # 0xf1
'Zha ', # 0xf2
'Dong ', # 0xf3
'Ju ', # 0xf4
'Die ', # 0xf5
'Nie ', # 0xf6
'Gan ', # 0xf7
'Hu ', # 0xf8
'Ping ', # 0xf9
'Mei ', # 0xfa
'Fu ', # 0xfb
'Sheng ', # 0xfc
'Gu ', # 0xfd
'Bi ', # 0xfe
'Wei ', # 0xff
)
| gpl-2.0 |
johnbachman/pysb | pysb/bng.py | 5 | 35923 | from __future__ import absolute_import
from __future__ import print_function as _
import pysb.core
from pysb.generator.bng import BngGenerator, format_complexpattern
import os
import subprocess
import re
import itertools
import sympy
import sympy.parsing.sympy_parser as sympy_parser
import numpy
import tempfile
import abc
from warnings import warn
import shutil
import collections
from collections.abc import Sequence
import pysb.pathfinder as pf
import tokenize
from pysb.logging import get_logger, EXTENDED_DEBUG
from sympy.logic.boolalg import Boolean
from io import StringIO
def set_bng_path(dir):
""" Deprecated. Use pysb.pathfinder.set_path() instead. """
warn("Function %s() is deprecated; use pysb.pathfinder.set_path() "
"instead" % set_bng_path.__name__, category=DeprecationWarning,
stacklevel=2)
pf.set_path('bng', dir)
class BngInterfaceError(RuntimeError):
"""BNG reported an error"""
pass
class BngBaseInterface(object):
""" Abstract base class for interfacing with BNG """
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, model=None, verbose=False, cleanup=False,
output_prefix=None, output_dir=None,
model_additional_species=None,
model_population_maps=None):
self._logger = get_logger(__name__,
model=model,
log_level=verbose)
self._base_file_stem = 'pysb'
self.cleanup = cleanup
self.output_prefix = 'tmpBNG' if output_prefix is None else \
output_prefix
if model:
self.generator = BngGenerator(
model, additional_initials=model_additional_species,
population_maps=model_population_maps
)
self.model = self.generator.model
self._check_model()
else:
self.generator = None
self.model = None
self.base_directory = tempfile.mkdtemp(prefix=self.output_prefix,
dir=output_dir)
self._logger.debug('{} instantiated in directory {}'.format(
self.__class__, self.base_directory)
)
def __enter__(self):
return self
@abc.abstractmethod
def __exit__(self):
return
def _delete_tmpdir(self):
shutil.rmtree(self.base_directory)
def _check_model(self):
"""
Checks a model has at least one initial condition and rule, raising
an exception if not
"""
if not self.model.rules:
raise NoRulesError()
if (
not self.model.initials
and not any(r.is_synth() for r in self.model.rules)
):
raise NoInitialConditionsError()
@classmethod
def _bng_param(cls, param):
"""
Ensures a BNG console parameter is in the correct format
Strings are double quoted and booleans are mapped to [0,1]. Other
types are currently used verbatim.
Parameters
----------
param :
An argument to a BNG action call
"""
if isinstance(param, str):
return '"%s"' % param
elif isinstance(param, bool):
return 1 if param else 0
elif isinstance(param, (Sequence, numpy.ndarray)):
return list(param)
return param
@abc.abstractmethod
def action(self, action, **kwargs):
"""
Generates code to execute a BNG action command
Parameters
----------
action: string
The name of the BNG action function
kwargs: kwargs, optional
Arguments and values to supply to BNG
"""
return
@classmethod
def _format_action_args(cls, **kwargs):
"""
Formats a set of arguments for BNG
Parameters
----------
kwargs: kwargs, optional
Arguments and values to supply to BNG
"""
if kwargs:
action_args = ','.join('%s=>%s' % (k, BngConsole._bng_param(v))
for k, v in kwargs.items())
else:
action_args = ''
return action_args
@property
def base_filename(self):
"""
Returns the base filename (without extension) for BNG output files
"""
return os.path.join(self.base_directory, self._base_file_stem)
@property
def bng_filename(self):
"""
Returns the BNG command list (.bngl) filename (does not check
whether the file exists)
"""
return self.base_filename + '.bngl'
@property
def net_filename(self):
"""
Returns the BNG network filename (does not check whether the file
exists)
"""
return self.base_filename + '.net'
def read_netfile(self):
"""
Reads a BNG network file as a string. Note that you must execute
network generation separately before attempting this, or the file will
not be found.
:return: Contents of the BNG network file as a string
"""
self._logger.debug('Reading BNG netfile: %s' % self.net_filename)
with open(self.net_filename, 'r') as net_file:
output = net_file.read()
return output
def read_simulation_results(self):
"""
Read the results of a BNG simulation as a numpy array
Returns
-------
numpy.ndarray
Simulation results in a 2D matrix (time on Y axis,
species/observables/expressions on X axis depending on
simulation type)
"""
return self.read_simulation_results_multi([self.base_filename])[0]
@staticmethod
def read_simulation_results_multi(base_filenames):
"""
Read the results of multiple BNG simulations
Parameters
----------
base_filenames: list of str
A list of filename stems to read simulation results in from,
including the full path but not including any file extension.
Returns
-------
list of numpy.ndarray
List of simulation results, each in a 2D matrix (time on Y axis,
species/observables/expressions on X axis depending on
simulation type)
"""
list_of_yfulls = []
for base_filename in base_filenames:
names = ['time']
# Read concentrations data
try:
cdat_arr = numpy.loadtxt(base_filename + '.cdat', skiprows=1, ndmin=2)
# -1 for time column
names += ['__s%d' % i for i in range(cdat_arr.shape[1] - 1)]
except IOError:
cdat_arr = None
# Read groups data
try:
with open(base_filename + '.gdat', 'r') as f:
# Exclude \# and time column
names += f.readline().split()[2:]
# Exclude first column (time)
gdat_arr = numpy.loadtxt(f, ndmin=2)
if cdat_arr is None:
cdat_arr = numpy.ndarray((len(gdat_arr), 0))
else:
gdat_arr = gdat_arr[:, 1:]
except IOError:
if cdat_arr is None:
raise BngInterfaceError('Need at least one of .cdat file or '
'.gdat file to read simulation '
'results')
gdat_arr = numpy.ndarray((len(cdat_arr), 0))
yfull_dtype = list(zip(names, itertools.repeat(float)))
yfull = numpy.ndarray(len(cdat_arr), yfull_dtype)
yfull_view = yfull.view(float).reshape(len(yfull), -1)
yfull_view[:, :cdat_arr.shape[1]] = cdat_arr
yfull_view[:, cdat_arr.shape[1]:] = gdat_arr
list_of_yfulls.append(yfull)
return list_of_yfulls
class BngConsole(BngBaseInterface):
""" Interact with BioNetGen through BNG Console """
def __init__(self, model=None, verbose=False, cleanup=True,
output_dir=None, output_prefix=None, timeout=30,
suppress_warnings=False, model_additional_species=None):
super(BngConsole, self).__init__(
model, verbose, cleanup, output_prefix, output_dir,
model_additional_species=model_additional_species
)
try:
import pexpect
except ImportError:
raise ImportError("Library 'pexpect' is required to use "
"BNGConsole, please install it to continue.\n"
"It is not currently available on Windows.")
if suppress_warnings:
warn("suppress_warnings is deprecated and has no effect. Adjust "
"the log level with the verbose argument instead.",
category=DeprecationWarning,
stacklevel=2)
# Generate BNGL file
if self.model:
with open(self.bng_filename, mode='w') as bng_file:
bng_file.write(self.generator.get_content())
# Start BNG Console and load BNGL
bng_path = pf.get_path('bng')
bng_exec_path = '%s --console' % bng_path
if not bng_path.endswith('.bat'):
bng_exec_path = 'perl %s' % bng_exec_path
self.console = pexpect.spawn(bng_exec_path,
cwd=self.base_directory,
timeout=timeout)
self._console_wait()
if self.model:
self.console.sendline('load %s' % self.bng_filename)
self._console_wait()
def __exit__(self, exc_type, exc_val, exc_tb):
"""
In console mode, commands have already been executed, so we simply
close down the console and erase the temporary directory if applicable.
"""
self.console.sendline('done')
self.console.close()
if self.cleanup:
self._delete_tmpdir()
def _console_wait(self):
"""
Wait for BNG console to process the command, and return the output
:return: BNG console output from the previous command
"""
self.console.expect('BNG>')
# Python 3 requires explicit conversion of 'bytes' to 'str'
console_msg = self.console.before.decode('utf-8')
if "ERROR:" in console_msg:
raise BngInterfaceError(console_msg)
elif "WARNING:" in console_msg:
self._logger.warning(console_msg)
else:
self._logger.debug(console_msg)
return console_msg
def generate_network(self, overwrite=False):
"""
Generates a network in BNG and returns the network file contents as
a string
Parameters
----------
overwrite: bool, optional
Overwrite existing network file, if any
"""
self.action('generate_network', overwrite=overwrite)
return self.read_netfile()
def action(self, action, **kwargs):
"""
Generates a BNG action command and executes it through the console,
returning any console output
Parameters
----------
action : string
The name of the BNG action function
kwargs : kwargs, optional
Arguments and values to supply to BNG
"""
# Process BNG arguments into a string
action_args = self._format_action_args(**kwargs)
# Execute the command via the console
if action_args == '':
cmd = 'action %s()' % action
else:
cmd = 'action %s({%s})' % (action, action_args)
self._logger.debug(cmd)
self.console.sendline(cmd)
# Wait for the command to execute and return the result
return self._console_wait()
def load_bngl(self, bngl_file):
"""
Load a BNGL file in the BNG console
Parameters
----------
bngl_file : string
The filename of a .bngl file
"""
cmd = 'load %s' % bngl_file
self._logger.debug(cmd)
self.console.sendline(cmd)
self._console_wait()
self._base_file_stem = os.path.splitext(os.path.basename(bngl_file))[0]
class BngFileInterface(BngBaseInterface):
def __init__(self, model=None, verbose=False, output_dir=None,
output_prefix=None, cleanup=True,
model_additional_species=None,
model_population_maps=None):
super(BngFileInterface, self).__init__(
model, verbose, cleanup, output_prefix, output_dir,
model_additional_species=model_additional_species,
model_population_maps=model_population_maps
)
self._init_command_queue()
def _init_command_queue(self):
"""
Initializes the BNG command queue
"""
self.command_queue = StringIO()
self.command_queue.write('begin actions\n')
def __exit__(self, exc_type, exc_val, exc_tb):
"""
In file interface mode, we close the command queue buffer (whether
or not it's been executed) and erase the temporary directory if
applicable.
"""
self.command_queue.close()
if self.cleanup:
self._delete_tmpdir()
def execute(self, reload_netfile=False, skip_file_actions=True):
"""
Executes all BNG commands in the command queue.
Parameters
----------
reload_netfile: bool or str
If true, attempts to reload an existing .net file from a
previous execute() iteration. If a string, the filename
specified in the string is supplied to BNG's readFile (which can be
any file type BNG supports, such as .net or .bngl).
This is useful for running multiple actions in a row,
where results need to be read into PySB before a new series of
actions is executed.
skip_file_actions: bool
Only used if the previous argument is not False. Set this
argument to True to ignore any actions block in the loaded file.
"""
self.command_queue.write('end actions\n')
bng_commands = self.command_queue.getvalue()
# Generate BNGL file
with open(self.bng_filename, 'w') as bng_file:
output = ''
if self.model and not reload_netfile:
output += self.generator.get_content()
if reload_netfile:
filename = reload_netfile if \
isinstance(reload_netfile, str) \
else self.net_filename
output += '\n readFile({file=>"%s",skip_actions=>%d})\n' \
% (filename, int(skip_file_actions))
output += bng_commands
bng_file.write(output)
lines = output.split('\n')
line_number_format = 'L{{:0{}d}} {{}}'.format(int(numpy.ceil(numpy.log10(len(lines)))))
output = '\n'.join(line_number_format.format(ln + 1, line) for ln, line in enumerate(lines))
self._logger.debug('BNG command file contents:\n' + output)
# Reset the command queue, in case execute() is called again
self.command_queue.close()
self._init_command_queue()
bng_exec_args = [pf.get_path('bng'), self.bng_filename]
if not bng_exec_args[0].endswith('.bat'):
bng_exec_args.insert(0, 'perl')
p = subprocess.Popen(bng_exec_args,
cwd=self.base_directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# output lines as DEBUG, unless a warning or error is encountered
capture_error = False
captured_error_lines = []
for line in iter(p.stdout.readline, b''):
line = line[:-1].decode('utf-8')
if line.startswith('ERROR:'):
capture_error = True
if capture_error:
captured_error_lines.append(line)
self._logger.debug(line)
# p_out is already consumed, so only get p_err
(_, p_err) = p.communicate()
p_err = p_err.decode('utf-8')
if p.returncode or captured_error_lines:
raise BngInterfaceError('\n'.join(captured_error_lines) + "\n" +
p_err.rstrip())
def action(self, action, **kwargs):
"""
Generates a BNG action command and adds it to the command queue
Parameters
----------
action : string
The name of the BNG action function
kwargs : kwargs, optional
Arguments and values to supply to BNG
"""
# Process BNG arguments into a string
action_args = self._format_action_args(**kwargs)
# Add the command to the queue
if action_args == '':
self.command_queue.write('\t%s()\n' % action)
else:
self.command_queue.write('\t%s({%s})\n' % (action, action_args))
return
def set_parameter(self, name, value):
"""
Generates a BNG action command and adds it to the command queue
Parameters
----------
name: string
The name of the parameter to set
value: float-like
Value of parameter
"""
self.command_queue.write('\tsetParameter("%s", %g)\n' % (name, value))
def set_concentration(self, cplx_pat, value):
"""
Generates a BNG action command and adds it to the command queue
Parameters
----------
cplx_pat: pysb.ComplexPattern or string
Species ComplexPattern, or a BNG format string representation
value: float-like
Initial concentration
"""
if isinstance(cplx_pat, str):
formatted_name = cplx_pat
else:
formatted_name = format_complexpattern(
pysb.core.as_complex_pattern(cplx_pat)
)
self.command_queue.write('\tsetConcentration("%s", %g)\n' % (
formatted_name, value))
def generate_hybrid_model(model, population_maps, additional_species=None,
safe=False, verbose=False, output_dir=None,
output_prefix=None, cleanup=True):
with BngFileInterface(model,
output_dir=output_dir,
output_prefix=output_prefix,
cleanup=cleanup,
model_additional_species=additional_species,
model_population_maps=population_maps) as bng:
bng.action('generate_hybrid_model', verbose=verbose, safe=safe,
suffix='hpp')
bng.execute()
with open(bng.base_filename + '_hpp.bngl', 'r') as f:
return f.read()
def run_ssa(model, t_end=10, n_steps=100, param_values=None, output_dir=None,
output_file_basename=None, cleanup=True, verbose=False,
**additional_args):
"""
Simulate a model with BNG's SSA simulator and return the trajectories.
Parameters
----------
model : Model
Model to simulate.
t_end : number, optional
Final time point of the simulation.
n_steps : int, optional
Number of steps in the simulation.
param_values : vector-like or dictionary, optional
Values to use for every parameter in the model. Ordering is
determined by the order of model.parameters.
If not specified, parameter values will be taken directly from
model.parameters.
output_dir : string, optional
Location for temporary files generated by BNG. If None (the
default), uses a temporary directory provided by the system. A
temporary directory with a random name is created within the
supplied location.
output_file_basename : string, optional
This argument is used as a prefix for the temporary BNG
output directory, rather than the individual files.
cleanup : bool, optional
If True (default), delete the temporary files after the simulation is
finished. If False, leave them in place. Useful for debugging.
verbose : bool or int, optional (default: False)
Sets the verbosity level of the logger. See the logging levels and
constants from Python's logging module for interpretation of integer
values. False is equal to the PySB default level (currently WARNING),
True is equal to DEBUG.
additional_args: kwargs, optional
Additional arguments to pass to BioNetGen
"""
bng_action_debug = verbose if isinstance(verbose, bool) else \
verbose <= EXTENDED_DEBUG
additional_args['method'] = 'ssa'
additional_args['t_end'] = t_end
additional_args['n_steps'] = n_steps
additional_args['verbose'] = additional_args.get('verbose',
bng_action_debug)
if param_values is not None:
if len(param_values) != len(model.parameters):
raise Exception("param_values must be the same length as "
"model.parameters")
for i in range(len(param_values)):
model.parameters[i].value = param_values[i]
with BngFileInterface(model, verbose=verbose, output_dir=output_dir,
output_prefix=output_file_basename,
cleanup=cleanup) as bngfile:
bngfile.action('generate_network', overwrite=True,
verbose=bng_action_debug)
bngfile.action('simulate', **additional_args)
bngfile.execute()
output = bngfile.read_netfile()
# Parse netfile (in case this hasn't already been done)
if not model.species:
_parse_netfile(model, iter(output.split('\n')))
yfull = bngfile.read_simulation_results()
return yfull
def generate_network(model, cleanup=True, append_stdout=False,
verbose=False, **kwargs):
"""
Return the output from BNG's generate_network function given a model.
The output is a BNGL model definition with additional sections 'reactions'
and 'groups', and the 'species' section expanded to contain all possible
species. BNG refers to this as a 'net' file.
Parameters
----------
model : Model
Model to pass to generate_network.
cleanup : bool, optional
If True (default), delete the temporary files after the simulation is
finished. If False, leave them in place (in `output_dir`). Useful for
debugging.
append_stdout : bool, optional
This option is no longer supported and has been left here for API
compatibility reasons.
verbose : bool or int, optional (default: False)
Sets the verbosity level of the logger. See the logging levels and
constants from Python's logging module for interpretation of integer
values. False is equal to the PySB default level (currently WARNING),
True is equal to DEBUG.
"""
bng_action_debug = verbose if isinstance(verbose, bool) else \
verbose <= EXTENDED_DEBUG
with BngFileInterface(model, verbose=verbose, cleanup=cleanup) as bngfile:
bngfile._logger.info('Generating reaction network')
bngfile.action('generate_network', overwrite=True,
verbose=bng_action_debug, **kwargs)
bngfile.execute()
output = bngfile.read_netfile()
return output
def load_equations(model, netfile):
"""
Load model equations from a specified netfile
Useful for large models where BioNetGen network generation takes a long
time - the .net file can be saved and reloaded using this function at a
later date.
Parameters
----------
model: pysb.Model
PySB model file
netfile: str
BNG netfile
"""
if model.reactions:
return
with open(netfile, 'r') as f:
_parse_netfile(model, f)
def generate_equations(model, cleanup=True, verbose=False, **kwargs):
"""
Generate math expressions for reaction rates and species in a model.
This fills in the following pieces of the model:
* species
* reactions
* reactions_bidirectional
* observables (just `coefficients` and `species` fields for each element)
Parameters
----------
model : Model
Model to pass to generate_network.
cleanup : bool, optional
If True (default), delete the temporary files after the simulation is
finished. If False, leave them in place (in `output_dir`). Useful for
debugging.
verbose : bool or int, optional (default: False)
Sets the verbosity level of the logger. See the logging levels and
constants from Python's logging module for interpretation of integer
values. False is equal to the PySB default level (currently WARNING),
True is equal to DEBUG.
"""
# only need to do this once
# TODO track "dirty" state, i.e. "has model been modified?"
# or, use a separate "math model" object to contain ODEs
if model.reactions:
return
lines = iter(generate_network(model, cleanup=cleanup,
verbose=verbose, **kwargs).split('\n'))
_parse_netfile(model, lines)
def _parse_netfile(model, lines):
""" Parse species, rxns and groups from a BNGL net file """
try:
while 'begin parameters' not in next(lines):
pass
while True:
line = next(lines)
if 'end parameters' in line: break
_parse_parameter(model, line)
while 'begin species' not in next(lines):
pass
model.species = []
while True:
line = next(lines)
if 'end species' in line: break
_parse_species(model, line)
while 'begin reactions' not in next(lines):
pass
reaction_cache = {}
while True:
line = next(lines)
if 'end reactions' in line: break
_parse_reaction(model, line, reaction_cache=reaction_cache)
# fix up reactions whose reverse version we saw first
for r in model.reactions_bidirectional:
if all(r['reverse']):
r['reactants'], r['products'] = r['products'], r['reactants']
r['rate'] *= -1
# now the 'reverse' value is no longer needed
del r['reverse']
while 'begin groups' not in next(lines):
pass
while True:
line = next(lines)
if 'end groups' in line: break
_parse_group(model, line)
except StopIteration as e:
pass
def _parse_parameter(model, line):
_, pname, pval, _, ptype = line.strip().split()
par_names = model.components.keys()
if pname not in par_names:
# Need to parse the value even for constants, since BNG considers some
# expressions to be "Constant", e.g. "2^2"
parsed_expr = parse_bngl_expr(pval, local_dict={
e.name: e for e in model.parameters | model.expressions})
if ptype == 'Constant' and pname not in model._derived_parameters.keys():
p = pysb.core.Parameter(pname, parsed_expr, _export=False)
model._derived_parameters.add(p)
elif ptype == 'ConstantExpression' and \
pname not in model._derived_expressions.keys():
p = pysb.core.Expression(pname, parsed_expr, _export=False)
model._derived_expressions.add(p)
else:
raise ValueError('Unknown type {} for parameter {}'.format(
ptype, pname))
def _parse_species(model, line):
"""Parse a 'species' line from a BNGL net file."""
index, species, value = line.strip().split()
species_compartment_name, complex_string = \
re.match(r'(?:@(\w+)::)?(.*)', species).groups()
species_compartment = model.compartments.get(species_compartment_name)
monomer_strings = complex_string.split('.')
monomer_patterns = []
for ms in monomer_strings:
monomer_name, site_strings, monomer_compartment_name = \
re.match(r'\$?(\w+)\(([^)]*)\)(?:@(\w+))?', ms).groups()
site_conditions = collections.defaultdict(list)
if len(site_strings):
for ss in site_strings.split(','):
# FIXME this should probably be done with regular expressions
if '!' in ss and '~' in ss:
site_name, condition = ss.split('~')
state, bond = condition.split('!')
if bond == '?':
bond = pysb.core.WILD
elif bond == '!':
bond = pysb.core.ANY
else:
bond = int(bond)
condition = (state, bond)
elif '!' in ss:
site_name, condition = ss.split('!', 1)
if '!' in condition:
condition = [int(c) for c in condition.split('!')]
else:
condition = int(condition)
elif '~' in ss:
site_name, condition = ss.split('~')
else:
site_name, condition = ss, None
site_conditions[site_name].append(condition)
site_conditions = {k: v[0] if len(v) == 1 else pysb.core.MultiState(*v)
for k, v in site_conditions.items()}
monomer = model.monomers[monomer_name]
monomer_compartment = model.compartments.get(monomer_compartment_name)
# Compartment prefix notation in BNGL means "assign this compartment to
# all molecules without their own explicit compartment".
compartment = monomer_compartment or species_compartment
mp = pysb.core.MonomerPattern(monomer, site_conditions, compartment)
monomer_patterns.append(mp)
cp = pysb.core.ComplexPattern(monomer_patterns, None)
model.species.append(cp)
def _parse_reaction(model, line, reaction_cache):
"""Parse a 'reaction' line from a BNGL net file."""
(number, reactants, products, rate, rule) = line.strip().split(' ', 4)
# the -1 is to switch from one-based to zero-based indexing
reactants = tuple(int(r) - 1 for r in reactants.split(',') if r != '0')
products = tuple(int(p) - 1 for p in products.split(',') if p != '0')
rate = rate.rsplit('*')
(rule_list, unit_conversion) = re.match(
r'#([\w,\(\)]+)(?: unit_conversion=(.*))?\s*$',
rule).groups()
rule_list = rule_list.split(',') # BNG lists all rules that generate a rxn
# Support new (BNG 2.2.6-stable or greater) and old BNG naming convention
# for reverse rules
rule_name, is_reverse = zip(*[re.subn('^_reverse_|\(reverse\)$', '', r)
for r in rule_list])
is_reverse = tuple(bool(i) for i in is_reverse)
r_names = ['__s%d' % r for r in reactants]
rate_param = [model.parameters.get(r) or model.expressions.get(r) or
model._derived_parameters.get(r) or
model._derived_expressions.get(r) or float(r) for r in rate]
combined_rate = sympy.Mul(*[sympy.S(t) for t in r_names + rate_param])
reaction = {
'reactants': reactants,
'products': products,
'rate': combined_rate,
'rule': rule_name,
'reverse': is_reverse,
}
model.reactions.append(reaction)
# bidirectional reactions
key = (reactants, products)
key_reverse = (products, reactants)
if key in reaction_cache:
reaction_bd = reaction_cache.get(key)
reaction_bd['rate'] += combined_rate
reaction_bd['rule'] += tuple(r for r in rule_name if r not in
reaction_bd['rule'])
elif key_reverse in reaction_cache:
reaction_bd = reaction_cache.get(key_reverse)
reaction_bd['reversible'] = True
reaction_bd['rate'] -= combined_rate
reaction_bd['rule'] += tuple(r for r in rule_name if r not in
reaction_bd['rule'])
else:
# make a copy of the reaction dict
reaction_bd = dict(reaction)
# default to false until we find a matching reverse reaction
reaction_bd['reversible'] = False
reaction_cache[key] = reaction_bd
model.reactions_bidirectional.append(reaction_bd)
def _parse_group(model, line):
"""Parse a 'group' line from a BNGL net file."""
# values are number (which we ignore), name, and species list
values = line.strip().split()
obs = model.observables[values[1]]
if len(values) == 3:
# combination is a comma separated list of [coeff*]speciesnumber
for product in values[2].split(','):
terms = product.split('*')
# if no coeff given (just species), insert a coeff of 1
if len(terms) == 1:
terms.insert(0, 1)
obs.coefficients.append(int(terms[0]))
# -1 to change to 0-based indexing
obs.species.append(int(terms[1]) - 1)
def _convert_tokens(tokens, local_dict, global_dict):
for pos, tok in enumerate(tokens):
if tok == (tokenize.NAME, 'if'):
tokens[pos] = (tokenize.NAME, '__bngl_if')
local_dict['__bngl_if'] = sympy.Function('bngl_if')
elif tok == (tokenize.OP, '^'):
tokens[pos] = (tokenize.OP, '**')
elif tok == (tokenize.NAME, 'and'):
tokens[pos] = (tokenize.OP, '&')
elif tok == (tokenize.NAME, 'or'):
tokens[pos] = (tokenize.OP, '|')
return tokens
def _is_bool_expr(e):
return isinstance(e, Boolean) and not isinstance(e, sympy.AtomicExpr)
def _fix_boolean_multiplication(*args):
args = [
sympy.Piecewise((1, a), (0, True)) if _is_bool_expr(a) else a
for a in args
]
return sympy.Mul(*args)
def parse_bngl_expr(text, *args, **kwargs):
"""Convert a BNGL math expression string to a sympy Expr."""
# Translate a few operators with simple text replacement.
text = text.replace('()', '')
text = text.replace('==', '=')
# Use sympy to parse the text into an Expr.
trans = (
sympy_parser.standard_transformations
+ (sympy_parser.convert_equals_signs, _convert_tokens)
)
expr = sympy_parser.parse_expr(text, *args, transformations=trans,
evaluate=False, **kwargs)
# Replace Boolean multiplications, e.g. `2 * (3 > 0)`
# See https://github.com/pysb/pysb/pull/494
expr = expr.replace(sympy.Mul, _fix_boolean_multiplication)
# Transforming 'if' to Piecewise requires subexpression rearrangement, so we
# use sympy's replace functionality rather than attempt it using text
# replacements above.
expr = expr.replace(
sympy.Function('bngl_if'),
lambda cond, t, f: sympy.Piecewise((t, cond), (f, True))
)
# Check for unsupported constructs.
if expr.has(sympy.Symbol('time')):
raise ValueError(
"Expressions referencing simulation time are not supported"
)
return expr
class NoInitialConditionsError(RuntimeError):
"""Model initial_conditions is empty."""
def __init__(self):
RuntimeError.__init__(self, "Model has no initial conditions or "
"zero-order synthesis rules")
class NoRulesError(RuntimeError):
"""Model rules is empty."""
def __init__(self):
RuntimeError.__init__(self, "Model has no rules")
| bsd-2-clause |
mosra/m.css | documentation/test_python/test_page.py | 1 | 4453 | #
# This file is part of m.css.
#
# Copyright © 2017, 2018, 2019, 2020 Vladimír Vondruš <mosra@centrum.cz>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import matplotlib
import os
import re
import subprocess
from distutils.version import LooseVersion
from . import BaseTestCase
def dot_version():
return re.match(".*version (?P<version>\d+\.\d+\.\d+).*", subprocess.check_output(['dot', '-V'], stderr=subprocess.STDOUT).decode('utf-8').strip()).group('version')
class Page(BaseTestCase):
def test(self):
self.run_python({
'INPUT_PAGES': ['index.rst', 'another.rst', 'error.rst']
})
self.assertEqual(*self.actual_expected_contents('index.html'))
self.assertEqual(*self.actual_expected_contents('another.html'))
self.assertEqual(*self.actual_expected_contents('error.html'))
self.assertEqual(*self.actual_expected_contents('pages.html'))
class InputSubdir(BaseTestCase):
def test(self):
self.run_python({
'INPUT': 'sub',
'INPUT_PAGES': ['index.rst']
})
# The same output as Page, just the file is taken from elsewhere
self.assertEqual(*self.actual_expected_contents('index.html', '../page/index.html'))
class Plugins(BaseTestCase):
def test(self):
self.run_python({
# Test all of them to check the registration works well
'PLUGINS': [
'm.abbr',
'm.code',
'm.components',
'm.dot',
'm.dox',
'm.gh',
'm.gl',
'm.images',
'm.link',
'm.plots',
'm.vk',
'fancyline'
],
'PLUGIN_PATHS': ['plugins'],
'INPUT_PAGES': ['index.rst', 'dot.rst', 'plots.rst'],
'M_HTMLSANITY_SMART_QUOTES': True,
'M_DOT_FONT': 'DejaVu Sans',
'M_PLOTS_FONT': 'DejaVu Sans',
'M_DOX_TAGFILES': [
(os.path.join(self.path, '../../../doc/documentation/corrade.tag'), 'https://doc.magnum.graphics/corrade/')
]
})
self.assertEqual(*self.actual_expected_contents('index.html'))
# The output is different for every other Graphviz
if LooseVersion(dot_version()) >= LooseVersion("2.44.0"):
file = 'dot.html'
elif LooseVersion(dot_version()) > LooseVersion("2.40.0"):
file = 'dot-240.html'
elif LooseVersion(dot_version()) >= LooseVersion("2.38.0"):
file = 'dot-238.html'
self.assertEqual(*self.actual_expected_contents('dot.html', file))
# I assume this will be a MASSIVE ANNOYANCE at some point as well so
# keeping it separate
self.assertEqual(*self.actual_expected_contents('plots.html'))
self.assertTrue(os.path.exists(os.path.join(self.path, 'output/tiny.png')))
import fancyline
self.assertEqual(fancyline.post_crawl_call_count, 1)
self.assertEqual(fancyline.scope_stack, [])
# No code, thus no docstrings processed
self.assertEqual(fancyline.docstring_call_count, 0)
# Once for each page, but nonce for render_docs() as that shouldn't
# generate any output anyway
self.assertEqual(fancyline.pre_page_call_count, 3)
self.assertEqual(fancyline.post_run_call_count, 1)
| mit |
ganeshmurthy/qpid-dispatch | tests/system_tests_qdstat.py | 2 | 39960 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import re
import system_test
import unittest
from subprocess import PIPE
from proton import Url, SSLDomain, SSLUnavailable, SASL
from system_test import main_module, SkipIfNeeded
from proton.utils import BlockingConnection
class QdstatTest(system_test.TestCase):
"""Test qdstat tool output"""
@classmethod
def setUpClass(cls):
super(QdstatTest, cls).setUpClass()
config = system_test.Qdrouterd.Config([
('router', {'id': 'QDR.A', 'workerThreads': 1}),
('listener', {'port': cls.tester.get_port()}),
])
cls.router = cls.tester.qdrouterd('test-router', config)
def run_qdstat(self, args, regexp=None, address=None):
if args:
popen_args = ['qdstat', '--bus', str(address or self.router.addresses[0]), '--timeout', str(system_test.TIMEOUT)] + args
else:
popen_args = ['qdstat', '--bus',
str(address or self.router.addresses[0]),
'--timeout', str(system_test.TIMEOUT)]
p = self.popen(popen_args,
name='qdstat-' + self.id(), stdout=PIPE, expect=None,
universal_newlines=True)
out = p.communicate()[0]
assert p.returncode == 0, \
"qdstat exit status %s, output:\n%s" % (p.returncode, out)
if regexp:
assert re.search(regexp, out, re.I), "Can't find '%s' in '%s'" % (regexp, out)
return out
def test_help(self):
self.run_qdstat(['--help'], r'Usage: qdstat')
def test_general(self):
out = self.run_qdstat(['--general'], r'(?s)Router Statistics.*Mode\s*Standalone')
self.assertTrue(re.match(r"(.*)\bConnections\b[ \t]+\b1\b(.*)",
out, flags=re.DOTALL) is not None, out)
self.assertTrue(re.match(r"(.*)\bNodes\b[ \t]+\b0\b(.*)",
out, flags=re.DOTALL) is not None, out)
self.assertTrue(re.match(r"(.*)\bAuto Links\b[ \t]+\b0\b(.*)",
out, flags=re.DOTALL) is not None, out)
self.assertTrue(re.match(r"(.*)\bLink Routes\b[ \t]+\b0\b(.*)",
out, flags=re.DOTALL) is not None, out)
self.assertTrue(re.match(r"(.*)\bWorker Threads\b[ \t]+\b1\b(.*)",
out, flags=re.DOTALL) is not None, out)
self.assertTrue(re.match(r"(.*)\bRouter Id\b[ \t]+\bQDR.A\b(.*)",
out, flags=re.DOTALL) is not None, out)
self.assertTrue(re.match(r"(.*)\bMode\b[ \t]+\bstandalone\b(.*)",
out, flags=re.DOTALL) is not None, out)
self.assertEqual(out.count("QDR.A"), 2)
def test_general_csv(self):
out = self.run_qdstat(['--general', '--csv'], r'(?s)Router Statistics.*Mode","Standalone')
self.assertIn('"Connections","1"', out)
self.assertIn('"Worker Threads","1"', out)
self.assertIn('"Nodes","0"', out)
self.assertIn('"Auto Links","0"', out)
self.assertIn('"Link Routes","0"', out)
self.assertIn('"Router Id","QDR.A"', out)
self.assertIn('"Mode","standalone"', out)
self.assertEqual(out.count("QDR.A"), 2)
def test_connections(self):
self.run_qdstat(['--connections'], r'host.*container.*role')
outs = self.run_qdstat(['--connections'], 'no-auth')
outs = self.run_qdstat(['--connections'], 'QDR.A')
def test_connections_csv(self):
self.run_qdstat(['--connections', "--csv"], r'host.*container.*role')
outs = self.run_qdstat(['--connections'], 'no-auth')
outs = self.run_qdstat(['--connections'], 'QDR.A')
def test_links(self):
self.run_qdstat(['--links'], r'QDR.A')
out = self.run_qdstat(['--links'], r'endpoint.*out.*local.*temp.')
parts = out.split("\n")
self.assertEqual(len(parts), 9)
def test_links_csv(self):
self.run_qdstat(['--links', "--csv"], r'QDR.A')
out = self.run_qdstat(['--links'], r'endpoint.*out.*local.*temp.')
parts = out.split("\n")
self.assertEqual(len(parts), 9)
def test_links_with_limit(self):
out = self.run_qdstat(['--links', '--limit=1'])
parts = out.split("\n")
self.assertEqual(len(parts), 8)
def test_links_with_limit_csv(self):
out = self.run_qdstat(['--links', '--limit=1', "--csv"])
parts = out.split("\n")
self.assertEqual(len(parts), 7)
def test_nodes(self):
self.run_qdstat(['--nodes'], r'No Router List')
def test_nodes_csv(self):
self.run_qdstat(['--nodes', "--csv"], r'No Router List')
def test_address(self):
out = self.run_qdstat(['--address'], r'QDR.A')
out = self.run_qdstat(['--address'], r'\$management')
parts = out.split("\n")
self.assertEqual(len(parts), 11)
def test_address_csv(self):
out = self.run_qdstat(['--address'], r'QDR.A')
out = self.run_qdstat(['--address'], r'\$management')
parts = out.split("\n")
self.assertEqual(len(parts), 11)
def test_qdstat_no_args(self):
outs = self.run_qdstat(args=None)
self.assertIn("Presettled Count", outs)
self.assertIn("Dropped Presettled Count", outs)
self.assertIn("Accepted Count", outs)
self.assertIn("Rejected Count", outs)
self.assertIn("Deliveries from Route Container", outs)
self.assertIn("Deliveries to Route Container", outs)
self.assertIn("Deliveries to Fallback", outs)
self.assertIn("Egress Count", outs)
self.assertIn("Ingress Count", outs)
self.assertIn("Uptime", outs)
def test_qdstat_no_other_args_csv(self):
outs = self.run_qdstat(["--csv"])
self.assertIn("Presettled Count", outs)
self.assertIn("Dropped Presettled Count", outs)
self.assertIn("Accepted Count", outs)
self.assertIn("Rejected Count", outs)
self.assertIn("Deliveries from Route Container", outs)
self.assertIn("Deliveries to Route Container", outs)
self.assertIn("Deliveries to Fallback", outs)
self.assertIn("Egress Count", outs)
self.assertIn("Ingress Count", outs)
self.assertIn("Uptime", outs)
def test_address_priority(self):
out = self.run_qdstat(['--address'])
lines = out.split("\n")
# make sure the output contains a header line
self.assertTrue(len(lines) >= 2)
# see if the header line has the word priority in it
priorityregexp = r'pri'
priority_column = re.search(priorityregexp, lines[4]).start()
self.assertTrue(priority_column > -1)
# extract the number in the priority column of every address
for i in range(6, len(lines) - 1):
pri = re.findall(r'[-\d]+', lines[i][priority_column:])
# make sure the priority found is a hyphen or a legal number
if pri[0] == '-':
pass # naked hypnen is allowed
else:
self.assertTrue(len(pri) > 0, "Can not find numeric priority in '%s'" % lines[i])
priority = int(pri[0])
# make sure the priority is from -1 to 9
self.assertTrue(priority >= -1, "Priority was less than -1")
self.assertTrue(priority <= 9, "Priority was greater than 9")
def test_address_priority_csv(self):
HEADER_ROW = 4
PRI_COL = 4
out = self.run_qdstat(['--address', "--csv"])
lines = out.split("\n")
# make sure the output contains a header line
self.assertTrue(len(lines) >= 2)
# see if the header line has the word priority in it
header_line = lines[HEADER_ROW].split(',')
self.assertTrue(header_line[PRI_COL] == '"pri"')
# extract the number in the priority column of every address
for i in range(HEADER_ROW + 1, len(lines) - 1):
line = lines[i].split(',')
pri = line[PRI_COL][1:-1] # unquoted value
# make sure the priority found is a hyphen or a legal number
if pri == '-':
pass # naked hypnen is allowed
else:
priority = int(pri)
# make sure the priority is from -1 to 9
self.assertTrue(priority >= -1, "Priority was less than -1")
self.assertTrue(priority <= 9, "Priority was greater than 9")
def test_address_with_limit(self):
out = self.run_qdstat(['--address', '--limit=1'])
parts = out.split("\n")
self.assertEqual(len(parts), 8)
def test_address_with_limit_csv(self):
out = self.run_qdstat(['--address', '--limit=1', '--csv'])
parts = out.split("\n")
self.assertEqual(len(parts), 7)
def test_memory(self):
out = self.run_qdstat(['--memory'])
if out.strip() == "No memory statistics available":
# router built w/o memory pools enabled]
return self.skipTest("Router's memory pools disabled")
self.assertIn("QDR.A", out)
self.assertIn("UTC", out)
regexp = r'qdr_address_t\s+[0-9]+'
assert re.search(regexp, out, re.I), "Can't find '%s' in '%s'" % (regexp, out)
def test_memory_csv(self):
out = self.run_qdstat(['--memory', '--csv'])
if out.strip() == "No memory statistics available":
# router built w/o memory pools enabled]
return self.skipTest("Router's memory pools disabled")
self.assertIn("QDR.A", out)
self.assertIn("UTC", out)
regexp = r'qdr_address_t","[0-9]+'
assert re.search(regexp, out, re.I), "Can't find '%s' in '%s'" % (regexp, out)
def test_policy(self):
out = self.run_qdstat(['--policy'])
self.assertIn("Maximum Concurrent Connections", out)
self.assertIn("Total Denials", out)
def test_policy_csv(self):
out = self.run_qdstat(['-p', "--csv"])
self.assertIn("Maximum Concurrent Connections", out)
self.assertIn("Total Denials", out)
def test_log(self):
self.run_qdstat(['--log', '--limit=5'], r'AGENT \(debug\).*GET-LOG')
def test_yy_query_many_links(self):
# This test will fail without the fix for DISPATCH-974
c = BlockingConnection(self.router.addresses[0])
count = 0
links = []
COUNT = 5000
ADDRESS_SENDER = "examples-sender"
ADDRESS_RECEIVER = "examples-receiver"
# This loop creates 5000 consumer and 5000 producer links
while True:
count += 1
r = c.create_receiver(ADDRESS_RECEIVER + str(count))
links.append(r)
s = c.create_sender(ADDRESS_SENDER + str(count))
links.append(c)
if count == COUNT:
break
# Now we run qdstat command and check if we got back details
# about all the 10000 links
# We do not specify the limit which means unlimited
# which means get everything that is there.
outs = self.run_qdstat(['--links'])
out_list = outs.split("\n")
out_links = 0
in_links = 0
for out in out_list:
if "endpoint in" in out and ADDRESS_SENDER in out:
in_links += 1
if "endpoint out" in out and ADDRESS_RECEIVER in out:
out_links += 1
self.assertEqual(in_links, COUNT)
self.assertEqual(out_links, COUNT)
# Run qdstat with a limit more than 10,000
outs = self.run_qdstat(['--links', '--limit=15000'])
out_list = outs.split("\n")
out_links = 0
in_links = 0
for out in out_list:
if "endpoint in" in out and ADDRESS_SENDER in out:
in_links += 1
if "endpoint out" in out and ADDRESS_RECEIVER in out:
out_links += 1
self.assertEqual(in_links, COUNT)
self.assertEqual(out_links, COUNT)
# Run qdstat with a limit less than 10,000
outs = self.run_qdstat(['--links', '--limit=2000'])
out_list = outs.split("\n")
links = 0
for out in out_list:
if "endpoint" in out and "examples" in out:
links += 1
self.assertEqual(links, 2000)
# Run qdstat with a limit less than 10,000
# repeat with --csv
outs = self.run_qdstat(['--links', '--limit=2000', '--csv'])
out_list = outs.split("\n")
links = 0
for out in out_list:
if "endpoint" in out and "examples" in out:
links += 1
self.assertEqual(links, 2000)
# Run qdstat with a limit of 700 because 700
# is the maximum number of rows we get per request
outs = self.run_qdstat(['--links', '--limit=700'])
out_list = outs.split("\n")
links = 0
for out in out_list:
if "endpoint" in out and "examples" in out:
links += 1
self.assertEqual(links, 700)
# Run qdstat with a limit of 700 because 700
# is the maximum number of rows we get per request
# repeat with --csv
outs = self.run_qdstat(['--links', '--limit=700', '--csv'])
out_list = outs.split("\n")
links = 0
for out in out_list:
if "endpoint" in out and "examples" in out:
links += 1
self.assertEqual(links, 700)
# Run qdstat with a limit of 500 because 700
# is the maximum number of rows we get per request
# and we want to try something less than 700
outs = self.run_qdstat(['--links', '--limit=500'])
out_list = outs.split("\n")
links = 0
for out in out_list:
if "endpoint" in out and "examples" in out:
links += 1
self.assertEqual(links, 500)
# Run qdstat with a limit of 500 because 700
# is the maximum number of rows we get per request
# and we want to try something less than 700
# repeat with --csv
outs = self.run_qdstat(['--links', '--limit=500', '--csv'])
out_list = outs.split("\n")
links = 0
for out in out_list:
if "endpoint" in out and "examples" in out:
links += 1
self.assertEqual(links, 500)
# DISPATCH-1485. Try to run qdstat with a limit=0. Without the fix for DISPATCH-1485
# this following command will hang and the test will fail.
outs = self.run_qdstat(['--links', '--limit=0'])
out_list = outs.split("\n")
links = 0
for out in out_list:
if "endpoint" in out and "examples" in out:
links += 1
self.assertEqual(links, COUNT * 2)
# DISPATCH-1485. Try to run qdstat with a limit=0. Without the fix for DISPATCH-1485
# this following command will hang and the test will fail.
# repeat with --csv
outs = self.run_qdstat(['--links', '--limit=0', '--csv'])
out_list = outs.split("\n")
links = 0
for out in out_list:
if "endpoint" in out and "examples" in out:
links += 1
self.assertEqual(links, COUNT * 2)
# This test would fail without the fix for DISPATCH-974
outs = self.run_qdstat(['--address'])
out_list = outs.split("\n")
sender_addresses = 0
receiver_addresses = 0
for out in out_list:
if ADDRESS_SENDER in out:
sender_addresses += 1
if ADDRESS_RECEIVER in out:
receiver_addresses += 1
self.assertEqual(sender_addresses, COUNT)
self.assertEqual(receiver_addresses, COUNT)
# Test if there is a non-zero uptime for the router in the output of
# qdstat -g
non_zero_seconds = False
outs = self.run_qdstat(args=None)
parts = outs.split("\n")
for part in parts:
if "Uptime" in part:
uptime_parts = part.split(" ")
for uptime_part in uptime_parts:
if uptime_part.startswith("000"):
time_parts = uptime_part.split(":")
if int(time_parts[3]) > 0:
non_zero_seconds = True
if not non_zero_seconds:
if int(time_parts[2]) > 0:
non_zero_seconds = True
self.assertTrue(non_zero_seconds)
c.close()
class QdstatTestVhostPolicy(system_test.TestCase):
"""Test qdstat-with-policy tool output"""
@classmethod
def setUpClass(cls):
super(QdstatTestVhostPolicy, cls).setUpClass()
config = system_test.Qdrouterd.Config([
('router', {'id': 'QDR.A', 'workerThreads': 1}),
('listener', {'port': cls.tester.get_port()}),
('policy', {'maxConnections': 100, 'enableVhostPolicy': 'true'}),
('vhost', {
'hostname': '$default',
'maxConnections': 2,
'allowUnknownUser': 'true',
'groups': {
'$default': {
'users': '*',
'remoteHosts': '*',
'sources': '*',
'targets': '*',
'allowDynamicSource': True
},
'HGCrawler': {
'users': 'Farmers',
'remoteHosts': '*',
'sources': '*',
'targets': '*',
'allowDynamicSource': True
},
},
})
])
cls.router = cls.tester.qdrouterd('test-router', config)
def run_qdstat(self, args, regexp=None, address=None):
if args:
popen_args = ['qdstat', '--bus', str(address or self.router.addresses[0]), '--timeout', str(system_test.TIMEOUT)] + args
else:
popen_args = ['qdstat', '--bus',
str(address or self.router.addresses[0]),
'--timeout', str(system_test.TIMEOUT)]
p = self.popen(popen_args,
name='qdstat-' + self.id(), stdout=PIPE, expect=None,
universal_newlines=True)
out = p.communicate()[0]
assert p.returncode == 0, \
"qdstat exit status %s, output:\n%s" % (p.returncode, out)
if regexp:
assert re.search(regexp, out, re.I), "Can't find '%s' in '%s'" % (regexp, out)
return out
def test_vhost(self):
out = self.run_qdstat(['--vhosts'])
self.assertIn("Vhosts", out)
self.assertIn("allowUnknownUser", out)
def test_vhost_csv(self):
out = self.run_qdstat(['--vhosts', '--csv'])
self.assertIn("Vhosts", out)
self.assertIn("allowUnknownUser", out)
def test_vhostgroups(self):
out = self.run_qdstat(['--vhostgroups'])
self.assertIn("Vhost Groups", out)
self.assertIn("allowAdminStatusUpdate", out)
self.assertIn("Vhost '$default' UserGroup '$default'", out)
self.assertIn("Vhost '$default' UserGroup 'HGCrawler'", out)
def test_vhostgroups_csv(self):
out = self.run_qdstat(['--vhostgroups', '--csv'])
self.assertIn("Vhost Groups", out)
self.assertIn("allowAdminStatusUpdate", out)
self.assertIn("Vhost '$default' UserGroup '$default'", out)
self.assertIn("Vhost '$default' UserGroup 'HGCrawler'", out)
def test_vhoststats(self):
out = self.run_qdstat(['--vhoststats'])
self.assertIn("Vhost Stats", out)
self.assertIn("maxMessageSizeDenied", out)
self.assertIn("Vhost User Stats", out)
self.assertIn("remote hosts", out)
def test_vhoststats_csv(self):
out = self.run_qdstat(['--vhoststats', '--csv'])
self.assertIn("Vhost Stats", out)
self.assertIn("maxMessageSizeDenied", out)
self.assertIn("Vhost User Stats", out)
self.assertIn("remote hosts", out)
class QdstatLinkPriorityTest(system_test.TestCase):
"""Need 2 routers to get inter-router links for the link priority test"""
@classmethod
def setUpClass(cls):
super(QdstatLinkPriorityTest, cls).setUpClass()
cls.inter_router_port = cls.tester.get_port()
config_1 = system_test.Qdrouterd.Config([
('router', {'mode': 'interior', 'id': 'R1'}),
('listener', {'port': cls.tester.get_port()}),
('connector', {'role': 'inter-router', 'port': cls.inter_router_port})
])
config_2 = system_test.Qdrouterd.Config([
('router', {'mode': 'interior', 'id': 'R2'}),
('listener', {'role': 'inter-router', 'port': cls.inter_router_port}),
])
cls.router_2 = cls.tester.qdrouterd('test_router_2', config_2, wait=True)
cls.router_1 = cls.tester.qdrouterd('test_router_1', config_1, wait=True)
cls.router_1.wait_router_connected('R2')
def address(self):
return self.router_1.addresses[0]
def run_qdstat(self, args):
p = self.popen(
['qdstat', '--bus', str(self.address()), '--timeout', str(system_test.TIMEOUT)] + args,
name='qdstat-' + self.id(), stdout=PIPE, expect=None,
universal_newlines=True)
out = p.communicate()[0]
assert p.returncode == 0, \
"qdstat exit status %s, output:\n%s" % (p.returncode, out)
return out
def test_link_priority(self):
out = self.run_qdstat(['--links'])
lines = out.split("\n")
# make sure the output contains a header line
self.assertTrue(len(lines) >= 2)
# see if the header line has the word priority in it
priorityregexp = r'pri'
priority_column = re.search(priorityregexp, lines[4]).start()
self.assertTrue(priority_column > -1)
# extract the number in the priority column of every inter-router link
priorities = {}
for i in range(6, len(lines) - 1):
if re.search(r'inter-router', lines[i]):
pri = re.findall(r'[-\d]+', lines[i][priority_column:])
# make sure the priority found is a number
self.assertTrue(len(pri) > 0, "Can not find numeric priority in '%s'" % lines[i])
self.assertTrue(pri[0] != '-') # naked hypen disallowed
priority = int(pri[0])
# make sure the priority is from 0 to 9
self.assertTrue(priority >= 0, "Priority was less than 0")
self.assertTrue(priority <= 9, "Priority was greater than 9")
# mark this priority as present
priorities[priority] = True
# make sure that all priorities are present in the list (currently 0-9)
self.assertEqual(len(priorities.keys()), 10, "Not all priorities are present")
def test_link_priority_csv(self):
HEADER_ROW = 4
TYPE_COL = 0
PRI_COL = 9
out = self.run_qdstat(['--links', '--csv'])
lines = out.split("\n")
# make sure the output contains a header line
self.assertTrue(len(lines) >= 2)
# see if the header line has the word priority in it
header_line = lines[HEADER_ROW].split(',')
self.assertTrue(header_line[PRI_COL] == '"pri"')
# extract the number in the priority column of every inter-router link
priorities = {}
for i in range(HEADER_ROW + 1, len(lines) - 1):
line = lines[i].split(',')
if line[TYPE_COL] == '"inter-router"':
pri = line[PRI_COL][1:-1]
# make sure the priority found is a number
self.assertTrue(len(pri) > 0, "Can not find numeric priority in '%s'" % lines[i])
self.assertTrue(pri != '-') # naked hypen disallowed
priority = int(pri)
# make sure the priority is from 0 to 9
self.assertTrue(priority >= 0, "Priority was less than 0")
self.assertTrue(priority <= 9, "Priority was greater than 9")
# mark this priority as present
priorities[priority] = True
# make sure that all priorities are present in the list (currently 0-9)
self.assertEqual(len(priorities.keys()), 10, "Not all priorities are present")
def _test_links_all_routers(self, command):
out = self.run_qdstat(command)
self.assertTrue(out.count('UTC') == 1)
self.assertTrue(out.count('Router Links') == 2)
self.assertTrue(out.count('inter-router') == 40)
self.assertTrue(out.count('router-control') == 4)
def test_links_all_routers(self):
self._test_links_all_routers(['--links', '--all-routers'])
def test_links_all_routers_csv(self):
self._test_links_all_routers(['--links', '--all-routers', '--csv'])
def _test_all_entities(self, command):
out = self.run_qdstat(command)
self.assertTrue(out.count('UTC') == 1)
self.assertTrue(out.count('Router Links') == 1)
self.assertTrue(out.count('Router Addresses') == 1)
self.assertTrue(out.count('Connections') == 6)
self.assertTrue(out.count('AutoLinks') == 2)
self.assertTrue(out.count('Link Routes') == 3)
self.assertTrue(out.count('Router Statistics') == 1)
self.assertTrue(out.count('Memory Pools') == 1)
def test_all_entities(self):
self._test_all_entities(['--all-entities'])
def test_all_entities_csv(self):
self._test_all_entities(['--all-entities', '--csv'])
def _test_all_entities_all_routers(self, command):
out = self.run_qdstat(command)
self.assertTrue(out.count('UTC') == 1)
self.assertTrue(out.count('Router Links') == 2)
self.assertTrue(out.count('Router Addresses') == 2)
self.assertTrue(out.count('Connections') == 12)
self.assertTrue(out.count('AutoLinks') == 4)
self.assertTrue(out.count('Link Routes') == 6)
self.assertTrue(out.count('Router Statistics') == 2)
self.assertTrue(out.count('Memory Pools') == 2)
def test_all_entities_all_routers(self):
self._test_all_entities_all_routers(['--all-entities', '--all-routers'])
def test_all_entities_all_routers_csv(self):
self._test_all_entities_all_routers(['--all-entities', '--csv', '--all-routers'])
try:
SSLDomain(SSLDomain.MODE_CLIENT)
class QdstatSslTest(system_test.TestCase):
"""Test qdstat tool output"""
@staticmethod
def ssl_file(name):
return os.path.join(system_test.DIR, 'ssl_certs', name)
@staticmethod
def sasl_path():
return os.path.join(system_test.DIR, 'sasl_configs')
@classmethod
def setUpClass(cls):
super(QdstatSslTest, cls).setUpClass()
# Write SASL configuration file:
with open('tests-mech-EXTERNAL.conf', 'w') as sasl_conf:
sasl_conf.write("mech_list: EXTERNAL ANONYMOUS DIGEST-MD5 PLAIN\n")
# qdrouterd configuration:
config = system_test.Qdrouterd.Config([
('router', {'id': 'QDR.B',
'saslConfigPath': os.getcwd(),
'workerThreads': 1,
'saslConfigName': 'tests-mech-EXTERNAL'}),
('sslProfile', {'name': 'server-ssl',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'password': 'server-password'}),
('listener', {'port': cls.tester.get_port()}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl', 'authenticatePeer': 'no', 'requireSsl': 'yes'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl', 'authenticatePeer': 'no', 'requireSsl': 'no'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl', 'authenticatePeer': 'yes', 'requireSsl': 'yes',
'saslMechanisms': 'EXTERNAL'})
])
cls.router = cls.tester.qdrouterd('test-router', config)
def run_qdstat(self, args, regexp=None, address=None):
p = self.popen(
['qdstat', '--bus', str(address or self.router.addresses[0]), '--ssl-disable-peer-name-verify',
'--timeout', str(system_test.TIMEOUT)] + args,
name='qdstat-' + self.id(), stdout=PIPE, expect=None,
universal_newlines=True)
out = p.communicate()[0]
assert p.returncode == 0, \
"qdstat exit status %s, output:\n%s" % (p.returncode, out)
if regexp:
assert re.search(regexp, out, re.I), "Can't find '%s' in '%s'" % (regexp, out)
return out
def get_ssl_args(self):
args = dict(
sasl_external=['--sasl-mechanisms', 'EXTERNAL'],
trustfile=['--ssl-trustfile', self.ssl_file('ca-certificate.pem')],
bad_trustfile=['--ssl-trustfile', self.ssl_file('bad-ca-certificate.pem')],
client_cert=['--ssl-certificate', self.ssl_file('client-certificate.pem')],
client_key=['--ssl-key', self.ssl_file('client-private-key.pem')],
client_pass=['--ssl-password', 'client-password'])
args['client_cert_all'] = args['client_cert'] + args['client_key'] + args['client_pass']
return args
def ssl_test(self, url_name, arg_names):
"""Run simple SSL connection test with supplied parameters.
See test_ssl_* below.
"""
args = self.get_ssl_args()
addrs = [self.router.addresses[i] for i in range(4)]
urls = dict(zip(['none', 'strict', 'unsecured', 'auth'], addrs))
urls.update(zip(['none_s', 'strict_s', 'unsecured_s', 'auth_s'],
(Url(a, scheme="amqps") for a in addrs)))
self.run_qdstat(['--general'] + sum([args[n] for n in arg_names], []),
regexp=r'(?s)Router Statistics.*Mode\s*Standalone',
address=str(urls[url_name]))
def ssl_test_bad(self, url_name, arg_names):
self.assertRaises(AssertionError, self.ssl_test, url_name, arg_names)
# Non-SSL enabled listener should fail SSL connections.
def test_ssl_none(self):
self.ssl_test('none', [])
def test_ssl_scheme_to_none(self):
self.ssl_test_bad('none_s', [])
def test_ssl_cert_to_none(self):
self.ssl_test_bad('none', ['client_cert'])
# Strict SSL listener, SSL only
def test_ssl_none_to_strict(self):
self.ssl_test_bad('strict', [])
def test_ssl_schema_to_strict(self):
self.ssl_test('strict_s', [])
def test_ssl_cert_to_strict(self):
self.ssl_test('strict_s', ['client_cert_all'])
def test_ssl_trustfile_to_strict(self):
self.ssl_test('strict_s', ['trustfile'])
def test_ssl_trustfile_cert_to_strict(self):
self.ssl_test('strict_s', ['trustfile', 'client_cert_all'])
def test_ssl_bad_trustfile_to_strict(self):
self.ssl_test_bad('strict_s', ['bad_trustfile'])
# Require-auth SSL listener
def test_ssl_none_to_auth(self):
self.ssl_test_bad('auth', [])
def test_ssl_schema_to_auth(self):
self.ssl_test_bad('auth_s', [])
def test_ssl_trustfile_to_auth(self):
self.ssl_test_bad('auth_s', ['trustfile'])
def test_ssl_cert_to_auth(self):
self.ssl_test('auth_s', ['client_cert_all'])
def test_ssl_trustfile_cert_to_auth(self):
self.ssl_test('auth_s', ['trustfile', 'client_cert_all'])
def test_ssl_bad_trustfile_to_auth(self):
self.ssl_test_bad('auth_s', ['bad_trustfile', 'client_cert_all'])
def test_ssl_cert_explicit_external_to_auth(self):
self.ssl_test('auth_s', ['sasl_external', 'client_cert_all'])
# Unsecured SSL listener, allows non-SSL
def test_ssl_none_to_unsecured(self):
self.ssl_test('unsecured', [])
def test_ssl_schema_to_unsecured(self):
self.ssl_test('unsecured_s', [])
def test_ssl_cert_to_unsecured(self):
self.ssl_test('unsecured_s', ['client_cert_all'])
def test_ssl_trustfile_to_unsecured(self):
self.ssl_test('unsecured_s', ['trustfile'])
def test_ssl_trustfile_cert_to_unsecured(self):
self.ssl_test('unsecured_s', ['trustfile', 'client_cert_all'])
def test_ssl_bad_trustfile_to_unsecured(self):
self.ssl_test_bad('unsecured_s', ['bad_trustfile'])
except SSLUnavailable:
class QdstatSslTest(system_test.TestCase):
def test_skip(self):
self.skipTest("Proton SSL support unavailable.")
try:
SSLDomain(SSLDomain.MODE_CLIENT)
class QdstatSslTestSslPasswordFile(QdstatSslTest):
"""
Tests the --ssl-password-file command line parameter
"""
def get_ssl_args(self):
args = dict(
sasl_external=['--sasl-mechanisms', 'EXTERNAL'],
trustfile=['--ssl-trustfile', self.ssl_file('ca-certificate.pem')],
bad_trustfile=['--ssl-trustfile', self.ssl_file('bad-ca-certificate.pem')],
client_cert=['--ssl-certificate', self.ssl_file('client-certificate.pem')],
client_key=['--ssl-key', self.ssl_file('client-private-key.pem')],
client_pass=['--ssl-password-file', self.ssl_file('client-password-file.txt')])
args['client_cert_all'] = args['client_cert'] + args['client_key'] + args['client_pass']
return args
except SSLUnavailable:
class QdstatSslTest(system_test.TestCase):
def test_skip(self):
self.skipTest("Proton SSL support unavailable.")
try:
SSLDomain(SSLDomain.MODE_CLIENT)
class QdstatSslNoExternalTest(system_test.TestCase):
"""Test qdstat can't connect without sasl_mech EXTERNAL"""
@staticmethod
def ssl_file(name):
return os.path.join(system_test.DIR, 'ssl_certs', name)
@staticmethod
def sasl_path():
return os.path.join(system_test.DIR, 'sasl_configs')
@classmethod
def setUpClass(cls):
super(QdstatSslNoExternalTest, cls).setUpClass()
# Write SASL configuration file:
with open('tests-mech-NOEXTERNAL.conf', 'w') as sasl_conf:
sasl_conf.write("mech_list: ANONYMOUS DIGEST-MD5 PLAIN\n")
# qdrouterd configuration:
config = system_test.Qdrouterd.Config([
('router', {'id': 'QDR.C',
'saslConfigPath': os.getcwd(),
'workerThreads': 1,
'saslConfigName': 'tests-mech-NOEXTERNAL'}),
('sslProfile', {'name': 'server-ssl',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'password': 'server-password'}),
('listener', {'port': cls.tester.get_port()}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl', 'authenticatePeer': 'no', 'requireSsl': 'yes'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl', 'authenticatePeer': 'no', 'requireSsl': 'no'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl', 'authenticatePeer': 'yes', 'requireSsl': 'yes',
'saslMechanisms': 'EXTERNAL'})
])
cls.router = cls.tester.qdrouterd('test-router', config)
def run_qdstat(self, args, regexp=None, address=None):
p = self.popen(
['qdstat', '--bus', str(address or self.router.addresses[0]), '--timeout', str(system_test.TIMEOUT)] + args,
name='qdstat-' + self.id(), stdout=PIPE, expect=None,
universal_newlines=True)
out = p.communicate()[0]
assert p.returncode == 0, \
"qdstat exit status %s, output:\n%s" % (p.returncode, out)
if regexp:
assert re.search(regexp, out, re.I), "Can't find '%s' in '%s'" % (regexp, out)
return out
def ssl_test(self, url_name, arg_names):
"""Run simple SSL connection test with supplied parameters.
See test_ssl_* below.
"""
args = dict(
trustfile=['--ssl-trustfile', self.ssl_file('ca-certificate.pem')],
bad_trustfile=['--ssl-trustfile', self.ssl_file('bad-ca-certificate.pem')],
client_cert=['--ssl-certificate', self.ssl_file('client-certificate.pem')],
client_key=['--ssl-key', self.ssl_file('client-private-key.pem')],
client_pass=['--ssl-password', 'client-password'])
args['client_cert_all'] = args['client_cert'] + args['client_key'] + args['client_pass']
addrs = [self.router.addresses[i] for i in range(4)]
urls = dict(zip(['none', 'strict', 'unsecured', 'auth'], addrs))
urls.update(zip(['none_s', 'strict_s', 'unsecured_s', 'auth_s'],
(Url(a, scheme="amqps") for a in addrs)))
self.run_qdstat(['--general'] + sum([args[n] for n in arg_names], []),
regexp=r'(?s)Router Statistics.*Mode\s*Standalone',
address=str(urls[url_name]))
def ssl_test_bad(self, url_name, arg_names):
self.assertRaises(AssertionError, self.ssl_test, url_name, arg_names)
@SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test")
def test_ssl_cert_to_auth_fail_no_sasl_external(self):
self.ssl_test_bad('auth_s', ['client_cert_all'])
def test_ssl_trustfile_cert_to_auth_fail_no_sasl_external(self):
self.ssl_test_bad('auth_s', ['trustfile', 'client_cert_all'])
except SSLUnavailable:
class QdstatSslTest(system_test.TestCase):
def test_skip(self):
self.skipTest("Proton SSL support unavailable.")
if __name__ == '__main__':
unittest.main(main_module())
| apache-2.0 |
mcepl/youtube-dl | youtube_dl/extractor/sapo.py | 178 | 4498 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
unified_strdate,
)
class SapoIE(InfoExtractor):
IE_DESC = 'SAPO Vídeos'
_VALID_URL = r'https?://(?:(?:v2|www)\.)?videos\.sapo\.(?:pt|cv|ao|mz|tl)/(?P<id>[\da-zA-Z]{20})'
_TESTS = [
{
'url': 'http://videos.sapo.pt/UBz95kOtiWYUMTA5Ghfi',
'md5': '79ee523f6ecb9233ac25075dee0eda83',
'note': 'SD video',
'info_dict': {
'id': 'UBz95kOtiWYUMTA5Ghfi',
'ext': 'mp4',
'title': 'Benfica - Marcas na Hitória',
'description': 'md5:c9082000a128c3fd57bf0299e1367f22',
'duration': 264,
'uploader': 'tiago_1988',
'upload_date': '20080229',
'categories': ['benfica', 'cabral', 'desporto', 'futebol', 'geovanni', 'hooijdonk', 'joao', 'karel', 'lisboa', 'miccoli'],
},
},
{
'url': 'http://videos.sapo.pt/IyusNAZ791ZdoCY5H5IF',
'md5': '90a2f283cfb49193fe06e861613a72aa',
'note': 'HD video',
'info_dict': {
'id': 'IyusNAZ791ZdoCY5H5IF',
'ext': 'mp4',
'title': 'Codebits VII - Report',
'description': 'md5:6448d6fd81ce86feac05321f354dbdc8',
'duration': 144,
'uploader': 'codebits',
'upload_date': '20140427',
'categories': ['codebits', 'codebits2014'],
},
},
{
'url': 'http://v2.videos.sapo.pt/yLqjzPtbTimsn2wWBKHz',
'md5': 'e5aa7cc0bdc6db9b33df1a48e49a15ac',
'note': 'v2 video',
'info_dict': {
'id': 'yLqjzPtbTimsn2wWBKHz',
'ext': 'mp4',
'title': 'Hipnose Condicionativa 4',
'description': 'md5:ef0481abf8fb4ae6f525088a6dadbc40',
'duration': 692,
'uploader': 'sapozen',
'upload_date': '20090609',
'categories': ['condicionativa', 'heloisa', 'hipnose', 'miranda', 'sapo', 'zen'],
},
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
item = self._download_xml(
'http://rd3.videos.sapo.pt/%s/rss2' % video_id, video_id).find('./channel/item')
title = item.find('./title').text
description = item.find('./{http://videos.sapo.pt/mrss/}synopse').text
thumbnail = item.find('./{http://search.yahoo.com/mrss/}content').get('url')
duration = parse_duration(item.find('./{http://videos.sapo.pt/mrss/}time').text)
uploader = item.find('./{http://videos.sapo.pt/mrss/}author').text
upload_date = unified_strdate(item.find('./pubDate').text)
view_count = int(item.find('./{http://videos.sapo.pt/mrss/}views').text)
comment_count = int(item.find('./{http://videos.sapo.pt/mrss/}comment_count').text)
tags = item.find('./{http://videos.sapo.pt/mrss/}tags').text
categories = tags.split() if tags else []
age_limit = 18 if item.find('./{http://videos.sapo.pt/mrss/}m18').text == 'true' else 0
video_url = item.find('./{http://videos.sapo.pt/mrss/}videoFile').text
video_size = item.find('./{http://videos.sapo.pt/mrss/}videoSize').text.split('x')
formats = [{
'url': video_url,
'ext': 'mp4',
'format_id': 'sd',
'width': int(video_size[0]),
'height': int(video_size[1]),
}]
if item.find('./{http://videos.sapo.pt/mrss/}HD').text == 'true':
formats.append({
'url': re.sub(r'/mov/1$', '/mov/39', video_url),
'ext': 'mp4',
'format_id': 'hd',
'width': 1280,
'height': 720,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'uploader': uploader,
'upload_date': upload_date,
'view_count': view_count,
'comment_count': comment_count,
'categories': categories,
'age_limit': age_limit,
'formats': formats,
}
| unlicense |
AMOboxTV/AMOBox.LegoBuild | script.module.youtube.dl/lib/youtube_dl/extractor/esri.py | 35 | 2627 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
int_or_none,
parse_filesize,
unified_strdate,
)
class EsriVideoIE(InfoExtractor):
_VALID_URL = r'https?://video\.esri\.com/watch/(?P<id>[0-9]+)'
_TEST = {
'url': 'https://video.esri.com/watch/1124/arcgis-online-_dash_-developing-applications',
'md5': 'd4aaf1408b221f1b38227a9bbaeb95bc',
'info_dict': {
'id': '1124',
'ext': 'mp4',
'title': 'ArcGIS Online - Developing Applications',
'description': 'Jeremy Bartley demonstrates how to develop applications with ArcGIS Online.',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 185,
'upload_date': '20120419',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
formats = []
for width, height, content in re.findall(
r'(?s)<li><strong>(\d+)x(\d+):</strong>(.+?)</li>', webpage):
for video_url, ext, filesize in re.findall(
r'<a[^>]+href="([^"]+)">([^<]+) \(([^<]+)\)</a>', content):
formats.append({
'url': compat_urlparse.urljoin(url, video_url),
'ext': ext.lower(),
'format_id': '%s-%s' % (ext.lower(), height),
'width': int(width),
'height': int(height),
'filesize_approx': parse_filesize(filesize),
})
self._sort_formats(formats)
title = self._html_search_meta('title', webpage, 'title')
description = self._html_search_meta(
'description', webpage, 'description', fatal=False)
thumbnail = self._html_search_meta('thumbnail', webpage, 'thumbnail', fatal=False)
if thumbnail:
thumbnail = re.sub(r'_[st]\.jpg$', '_x.jpg', thumbnail)
duration = int_or_none(self._search_regex(
[r'var\s+videoSeconds\s*=\s*(\d+)', r"'duration'\s*:\s*(\d+)"],
webpage, 'duration', fatal=False))
upload_date = unified_strdate(self._html_search_meta(
'last-modified', webpage, 'upload date', fatal=False))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'upload_date': upload_date,
'formats': formats
}
| gpl-2.0 |
timesqueezer/mdfork | mooddiary/__init__.py | 1 | 1499 | import os
from datetime import timedelta
from flask import Flask
from flask.ext.assets import Bundle
from mooddiary.core import db, migrate, assets, jwt
from mooddiary.bundles import js, css
from mooddiary.views import main
from mooddiary.api import api
from mooddiary.auth import auth
from mooddiary.models import Entry, EntryField, EntryFieldAnswer, User
def create_app(config=None):
app = Flask(__name__)
app.config['SECRET_KEY'] = '129iv3n91283nv9812n3v89q2nnv9iaszv978n98qwe7z897d'
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres@localhost/mooddiaryDb'
#app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/mooddiary.db'
app.config['DEBUG'] = True
app.config['ASSETS_DEBUG'] = True
app.config['LESS_BIN'] = os.path.realpath(os.path.join(os.path.dirname(__file__), '../node_modules/less/bin/lessc'))
app.config['JWT_EXPIRATION_DELTA'] = timedelta(days=28)
app.config['JWT_ALGORITHM'] = 'HS512'
app.config['RECAPTCHA_SECRET_KEY'] = '6LdSswwTAAAAADs20eK6NqYaeppxIWm-gJrpto0l'
app.config['RECAPTCHA_PUBLIC_KEY'] = '6LdSswwTAAAAABTZq5Za_0blmaSpcg-dFcqaGda9'
if config:
app.config.from_object(config)
db.init_app(app)
migrate.init_app(app, db)
assets.init_app(app)
jwt.init_app(app)
assets.register('js', js)
assets.register('css', css)
app.register_blueprint(main)
app.register_blueprint(api, url_prefix='/api')
app.register_blueprint(auth, url_prefix='/auth')
return app
| mit |
pgmillon/ansible | lib/ansible/modules/notification/matrix.py | 47 | 3854 | #!/usr/bin/python
# coding: utf-8
# (c) 2018, Jan Christian Grünhage <jan.christian@gruenhage.xyz>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
author: "Jan Christian Grünhage (@jcgruenhage)"
module: matrix
short_description: Send notifications to matrix
description:
- This module sends html formatted notifications to matrix rooms.
version_added: "2.8"
options:
msg_plain:
description:
- Plain text form of the message to send to matrix, usually markdown
required: true
msg_html:
description:
- HTML form of the message to send to matrix
required: true
room_id:
description:
- ID of the room to send the notification to
required: true
hs_url:
description:
- URL of the homeserver, where the CS-API is reachable
required: true
token:
description:
- Authentication token for the API call. If provided, user_id and password are not required
user_id:
description:
- The user id of the user
password:
description:
- The password to log in with
requirements:
- matrix-client (Python library)
'''
EXAMPLES = '''
- name: Send matrix notification with token
matrix:
msg_plain: "**hello world**"
msg_html: "<b>hello world</b>"
room_id: "!12345678:server.tld"
hs_url: "https://matrix.org"
token: "{{ matrix_auth_token }}"
- name: Send matrix notification with user_id and password
matrix:
msg_plain: "**hello world**"
msg_html: "<b>hello world</b>"
room_id: "!12345678:server.tld"
hs_url: "https://matrix.org"
user_id: "ansible_notification_bot"
password: "{{ matrix_auth_password }}"
'''
RETURN = '''
'''
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
MATRIX_IMP_ERR = None
try:
from matrix_client.client import MatrixClient
except ImportError:
MATRIX_IMP_ERR = traceback.format_exc()
matrix_found = False
else:
matrix_found = True
def run_module():
module_args = dict(
msg_plain=dict(type='str', required=True),
msg_html=dict(type='str', required=True),
room_id=dict(type='str', required=True),
hs_url=dict(type='str', required=True),
token=dict(type='str', required=False, no_log=True),
user_id=dict(type='str', required=False),
password=dict(type='str', required=False, no_log=True),
)
result = dict(
changed=False,
message=''
)
module = AnsibleModule(
argument_spec=module_args,
mutually_exclusive=[['password', 'token']],
required_one_of=[['password', 'token']],
required_together=[['user_id', 'password']],
supports_check_mode=True
)
if not matrix_found:
module.fail_json(msg=missing_required_lib('matrix-client'), exception=MATRIX_IMP_ERR)
if module.check_mode:
return result
# create a client object
client = MatrixClient(module.params['hs_url'])
if module.params['token'] is not None:
client.api.token = module.params['token']
else:
client.login(module.params['user_id'], module.params['password'], sync=False)
# make sure we are in a given room and return a room object for it
room = client.join_room(module.params['room_id'])
# send an html formatted messages
room.send_html(module.params['msg_html'], module.params['msg_plain'])
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
| gpl-3.0 |
gnarula/eden_deployment | modules/unit_tests/s3db/project.py | 6 | 2451 | # -*- coding: utf-8 -*-
#
# Project Unit Tests
#
# To run this script use:
# python web2py.py -S eden -M -R applications/eden/modules/unit_tests/s3db/project.py
#
import unittest
from gluon import *
from gluon.storage import Storage
from gluon.dal import Row
from eden.project import S3ProjectActivityModel
# =============================================================================
class ProjectTests(unittest.TestCase):
""" Project Module Tests """
def setUp(self):
""" Set up organisation records """
# auth = current.auth
s3db = current.s3db
auth.override = True
ptable = s3db.project_project
atable = s3db.project_activity
p1 = Row(name="Test Project 1", code="TP1")
p1_id = ptable.insert(**p1)
p1.update(id=p1_id)
a1 = Row(name="Test Activity 1", project_id=p1_id)
a1_id = atable.insert(**a1)
a1.update(id=a1_id)
# activity without a project
a2 = Row(name="Test Activity 2")
a2_id = atable.insert(**a2)
a2.update(id=a2_id)
self.p1 = p1
self.a1 = a1
self.a2 = a2
def testActivityRepresent(self):
rep = S3ProjectActivityModel.project_activity_represent
self.assertEqual(
rep(self.a1.id),
"%s - %s" % (self.p1.code, self.a1.name),
)
self.assertEqual(
rep(self.a2.id),
"%s" % self.a2.name,
)
self.assertEqual(
rep(None),
current.messages.NONE,
)
self.assertEqual(
rep(self.a1),
"%s - %s" % (self.p1.code, self.a1.name),
)
self.assertEqual(
rep(self.a1.id, self.a1),
"%s - %s" % (self.p1.code, self.a1.name),
)
def tearDown(self):
db.rollback()
auth.override = False
# =============================================================================
def run_suite(*test_classes):
""" Run the test suite """
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
if suite is not None:
unittest.TextTestRunner().run(suite)
return
if __name__ == "__main__":
run_suite(
ProjectTests,
)
# END ========================================================================
| mit |
adaxi/couchpotato | libs/suds/mx/encoded.py | 211 | 4651 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides encoded I{marshaller} classes.
"""
from logging import getLogger
from suds import *
from suds.mx import *
from suds.mx.literal import Literal
from suds.mx.typer import Typer
from suds.sudsobject import Factory, Object
from suds.xsd.query import TypeQuery
log = getLogger(__name__)
#
# Add encoded extensions
# aty = The soap (section 5) encoded array type.
#
Content.extensions.append('aty')
class Encoded(Literal):
"""
A SOAP section (5) encoding marshaller.
This marshaller supports rpc/encoded soap styles.
"""
def start(self, content):
#
# For soap encoded arrays, the 'aty' (array type) information
# is extracted and added to the 'content'. Then, the content.value
# is replaced with an object containing an 'item=[]' attribute
# containing values that are 'typed' suds objects.
#
start = Literal.start(self, content)
if start and isinstance(content.value, (list,tuple)):
resolved = content.type.resolve()
for c in resolved:
if hasattr(c[0], 'aty'):
content.aty = (content.tag, c[0].aty)
self.cast(content)
break
return start
def end(self, parent, content):
#
# For soap encoded arrays, the soapenc:arrayType attribute is
# added with proper type and size information.
# Eg: soapenc:arrayType="xs:int[3]"
#
Literal.end(self, parent, content)
if content.aty is None:
return
tag, aty = content.aty
ns0 = ('at0', aty[1])
ns1 = ('at1', 'http://schemas.xmlsoap.org/soap/encoding/')
array = content.value.item
child = parent.getChild(tag)
child.addPrefix(ns0[0], ns0[1])
child.addPrefix(ns1[0], ns1[1])
name = '%s:arrayType' % ns1[0]
value = '%s:%s[%d]' % (ns0[0], aty[0], len(array))
child.set(name, value)
def encode(self, node, content):
if content.type.any():
Typer.auto(node, content.value)
return
if content.real.any():
Typer.auto(node, content.value)
return
ns = None
name = content.real.name
if self.xstq:
ns = content.real.namespace()
Typer.manual(node, name, ns)
def cast(self, content):
"""
Cast the I{untyped} list items found in content I{value}.
Each items contained in the list is checked for XSD type information.
Items (values) that are I{untyped}, are replaced with suds objects and
type I{metadata} is added.
@param content: The content holding the collection.
@type content: L{Content}
@return: self
@rtype: L{Encoded}
"""
aty = content.aty[1]
resolved = content.type.resolve()
array = Factory.object(resolved.name)
array.item = []
query = TypeQuery(aty)
ref = query.execute(self.schema)
if ref is None:
raise TypeNotFound(qref)
for x in content.value:
if isinstance(x, (list, tuple)):
array.item.append(x)
continue
if isinstance(x, Object):
md = x.__metadata__
md.sxtype = ref
array.item.append(x)
continue
if isinstance(x, dict):
x = Factory.object(ref.name, x)
md = x.__metadata__
md.sxtype = ref
array.item.append(x)
continue
x = Factory.property(ref.name, x)
md = x.__metadata__
md.sxtype = ref
array.item.append(x)
content.value = array
return self
| gpl-3.0 |
ShashaQin/erpnext | erpnext/stock/stock_balance.py | 6 | 9389 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr, nowdate, nowtime
from erpnext.stock.utils import update_bin
from erpnext.stock.stock_ledger import update_entries_after
def repost(only_actual=False, allow_negative_stock=False, allow_zero_rate=False, only_bin=False):
"""
Repost everything!
"""
frappe.db.auto_commit_on_many_writes = 1
if allow_negative_stock:
existing_allow_negative_stock = frappe.db.get_value("Stock Settings", None, "allow_negative_stock")
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1)
for d in frappe.db.sql("""select distinct item_code, warehouse from
(select item_code, warehouse from tabBin
union
select item_code, warehouse from `tabStock Ledger Entry`) a"""):
try:
repost_stock(d[0], d[1], allow_zero_rate, only_actual, only_bin)
frappe.db.commit()
except:
frappe.db.rollback()
if allow_negative_stock:
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", existing_allow_negative_stock)
frappe.db.auto_commit_on_many_writes = 0
def repost_stock(item_code, warehouse, allow_zero_rate=False, only_actual=False, only_bin=False):
if not only_bin:
repost_actual_qty(item_code, warehouse, allow_zero_rate)
if item_code and warehouse and not only_actual:
qty_dict = {
"reserved_qty": get_reserved_qty(item_code, warehouse),
"indented_qty": get_indented_qty(item_code, warehouse),
"ordered_qty": get_ordered_qty(item_code, warehouse),
"planned_qty": get_planned_qty(item_code, warehouse)
}
if only_bin:
qty_dict.update({
"actual_qty": get_balance_qty_from_sle(item_code, warehouse)
})
update_bin_qty(item_code, warehouse, qty_dict)
def repost_actual_qty(item_code, warehouse, allow_zero_rate=False):
try:
update_entries_after({ "item_code": item_code, "warehouse": warehouse }, allow_zero_rate)
except:
pass
def get_balance_qty_from_sle(item_code, warehouse):
balance_qty = frappe.db.sql("""select qty_after_transaction from `tabStock Ledger Entry`
where item_code=%s and warehouse=%s and is_cancelled='No'
order by posting_date desc, posting_time desc, name desc
limit 1""", (item_code, warehouse))
return flt(balance_qty[0][0]) if balance_qty else 0.0
def get_reserved_qty(item_code, warehouse):
reserved_qty = frappe.db.sql("""
select
sum((dnpi_qty / so_item_qty) * (so_item_qty - so_item_delivered_qty))
from
(
(select
qty as dnpi_qty,
(
select qty from `tabSales Order Item`
where name = dnpi.parent_detail_docname
and (delivered_by_supplier is null or delivered_by_supplier = 0)
) as so_item_qty,
(
select delivered_qty from `tabSales Order Item`
where name = dnpi.parent_detail_docname
and delivered_by_supplier = 0
) as so_item_delivered_qty,
parent, name
from
(
select qty, parent_detail_docname, parent, name
from `tabPacked Item` dnpi_in
where item_code = %s and warehouse = %s
and parenttype="Sales Order"
and item_code != parent_item
and exists (select * from `tabSales Order` so
where name = dnpi_in.parent and docstatus = 1 and status != 'Closed')
) dnpi)
union
(select qty as dnpi_qty, qty as so_item_qty,
delivered_qty as so_item_delivered_qty, parent, name
from `tabSales Order Item` so_item
where item_code = %s and warehouse = %s
and (so_item.delivered_by_supplier is null or so_item.delivered_by_supplier = 0)
and exists(select * from `tabSales Order` so
where so.name = so_item.parent and so.docstatus = 1
and so.status != 'Closed'))
) tab
where
so_item_qty >= so_item_delivered_qty
""", (item_code, warehouse, item_code, warehouse))
return flt(reserved_qty[0][0]) if reserved_qty else 0
def get_indented_qty(item_code, warehouse):
indented_qty = frappe.db.sql("""select sum(mr_item.qty - mr_item.ordered_qty)
from `tabMaterial Request Item` mr_item, `tabMaterial Request` mr
where mr_item.item_code=%s and mr_item.warehouse=%s
and mr_item.qty > mr_item.ordered_qty and mr_item.parent=mr.name
and mr.status!='Stopped' and mr.docstatus=1""", (item_code, warehouse))
return flt(indented_qty[0][0]) if indented_qty else 0
def get_ordered_qty(item_code, warehouse):
ordered_qty = frappe.db.sql("""
select sum((po_item.qty - po_item.received_qty)*po_item.conversion_factor)
from `tabPurchase Order Item` po_item, `tabPurchase Order` po
where po_item.item_code=%s and po_item.warehouse=%s
and po_item.qty > po_item.received_qty and po_item.parent=po.name
and po.status not in ('Closed', 'Delivered') and po.docstatus=1
and po_item.delivered_by_supplier = 0""", (item_code, warehouse))
return flt(ordered_qty[0][0]) if ordered_qty else 0
def get_planned_qty(item_code, warehouse):
planned_qty = frappe.db.sql("""
select sum(qty - produced_qty) from `tabProduction Order`
where production_item = %s and fg_warehouse = %s and status != "Stopped"
and docstatus=1 and qty > produced_qty""", (item_code, warehouse))
return flt(planned_qty[0][0]) if planned_qty else 0
def update_bin_qty(item_code, warehouse, qty_dict=None):
from erpnext.stock.utils import get_bin
bin = get_bin(item_code, warehouse)
mismatch = False
for fld, val in qty_dict.items():
if flt(bin.get(fld)) != flt(val):
bin.set(fld, flt(val))
mismatch = True
if mismatch:
bin.projected_qty = flt(bin.actual_qty) + flt(bin.ordered_qty) + \
flt(bin.indented_qty) + flt(bin.planned_qty) - flt(bin.reserved_qty)
bin.save()
def set_stock_balance_as_per_serial_no(item_code=None, posting_date=None, posting_time=None,
fiscal_year=None):
if not posting_date: posting_date = nowdate()
if not posting_time: posting_time = nowtime()
condition = " and item.name='%s'" % item_code.replace("'", "\'") if item_code else ""
bin = frappe.db.sql("""select bin.item_code, bin.warehouse, bin.actual_qty, item.stock_uom
from `tabBin` bin, tabItem item
where bin.item_code = item.name and item.has_serial_no = 1 %s""" % condition)
for d in bin:
serial_nos = frappe.db.sql("""select count(name) from `tabSerial No`
where item_code=%s and warehouse=%s and docstatus < 2""", (d[0], d[1]))
if serial_nos and flt(serial_nos[0][0]) != flt(d[2]):
print d[0], d[1], d[2], serial_nos[0][0]
sle = frappe.db.sql("""select valuation_rate, company from `tabStock Ledger Entry`
where item_code = %s and warehouse = %s and ifnull(is_cancelled, 'No') = 'No'
order by posting_date desc limit 1""", (d[0], d[1]))
sle_dict = {
'doctype' : 'Stock Ledger Entry',
'item_code' : d[0],
'warehouse' : d[1],
'transaction_date' : nowdate(),
'posting_date' : posting_date,
'posting_time' : posting_time,
'voucher_type' : 'Stock Reconciliation (Manual)',
'voucher_no' : '',
'voucher_detail_no' : '',
'actual_qty' : flt(serial_nos[0][0]) - flt(d[2]),
'stock_uom' : d[3],
'incoming_rate' : sle and flt(serial_nos[0][0]) > flt(d[2]) and flt(sle[0][0]) or 0,
'company' : sle and cstr(sle[0][1]) or 0,
'is_cancelled' : 'No',
'batch_no' : '',
'serial_no' : ''
}
sle_doc = frappe.get_doc(sle_dict)
sle_doc.flags.ignore_validate = True
sle_doc.flags.ignore_links = True
sle_doc.insert()
args = sle_dict.copy()
args.update({
"sle_id": sle_doc.name,
"is_amended": 'No'
})
update_bin(args)
update_entries_after({
"item_code": d[0],
"warehouse": d[1],
"posting_date": posting_date,
"posting_time": posting_time
})
def reset_serial_no_status_and_warehouse(serial_nos=None):
if not serial_nos:
serial_nos = frappe.db.sql_list("""select name from `tabSerial No` where docstatus = 0""")
for serial_no in serial_nos:
try:
sr = frappe.get_doc("Serial No", serial_no)
last_sle = sr.get_last_sle()
if flt(last_sle.actual_qty) > 0:
sr.warehouse = last_sle.warehouse
sr.via_stock_ledger = True
sr.save()
except:
pass
def repost_all_stock_vouchers():
warehouses_with_account = frappe.db.sql_list("""select master_name from tabAccount
where ifnull(account_type, '') = 'Warehouse'""")
vouchers = frappe.db.sql("""select distinct voucher_type, voucher_no
from `tabStock Ledger Entry` sle
where voucher_type != "Serial No" and sle.warehouse in (%s)
order by posting_date, posting_time, name""" %
', '.join(['%s']*len(warehouses_with_account)), tuple(warehouses_with_account))
rejected = []
i = 0
for voucher_type, voucher_no in vouchers:
i+=1
print i, "/", len(vouchers)
try:
for dt in ["Stock Ledger Entry", "GL Entry"]:
frappe.db.sql("""delete from `tab%s` where voucher_type=%s and voucher_no=%s"""%
(dt, '%s', '%s'), (voucher_type, voucher_no))
doc = frappe.get_doc(voucher_type, voucher_no)
if voucher_type=="Stock Entry" and doc.purpose in ["Manufacture", "Repack"]:
doc.calculate_rate_and_amount(force=1)
elif voucher_type=="Purchase Receipt" and doc.is_subcontracted == "Yes":
doc.validate()
doc.update_stock_ledger()
doc.make_gl_entries(repost_future_gle=False)
frappe.db.commit()
except Exception, e:
print frappe.get_traceback()
rejected.append([voucher_type, voucher_no])
frappe.db.rollback()
print rejected
| agpl-3.0 |
alexzoo/python | selenium_tests/env/lib/python3.6/site-packages/selenium/webdriver/phantomjs/webdriver.py | 29 | 2934 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from .service import Service
class WebDriver(RemoteWebDriver):
"""
Wrapper to communicate with PhantomJS through Ghostdriver.
You will need to follow all the directions here:
https://github.com/detro/ghostdriver
"""
def __init__(self, executable_path="phantomjs",
port=0, desired_capabilities=DesiredCapabilities.PHANTOMJS,
service_args=None, service_log_path=None):
"""
Creates a new instance of the PhantomJS / Ghostdriver.
Starts the service and then creates new instance of the driver.
:Args:
- executable_path - path to the executable. If the default is used it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0, a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- service_args : A List of command line arguments to pass to PhantomJS
- service_log_path: Path for phantomjs service to log to.
"""
self.service = Service(
executable_path,
port=port,
service_args=service_args,
log_path=service_log_path)
self.service.start()
try:
RemoteWebDriver.__init__(
self,
command_executor=self.service.service_url,
desired_capabilities=desired_capabilities)
except Exception:
self.quit()
raise
self._is_remote = False
def quit(self):
"""
Closes the browser and shuts down the PhantomJS executable
that is started when starting the PhantomJS
"""
try:
RemoteWebDriver.quit(self)
except Exception:
# We don't care about the message because something probably has gone wrong
pass
finally:
self.service.stop()
| apache-2.0 |
philsch/ansible | contrib/inventory/openvz.py | 196 | 2818 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# openvz.py
#
# Copyright 2014 jordonr <jordon@beamsyn.net>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Inspired by libvirt_lxc.py inventory script
# https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py
#
# Groups are determined by the description field of openvz guests
# multiple groups can be separated by commas: webserver,dbserver
from subprocess import Popen, PIPE
import sys
import json
# List openvz hosts
vzhosts = ['vzhost1', 'vzhost2', 'vzhost3']
# Add openvz hosts to the inventory and Add "_meta" trick
inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}}
# default group, when description not defined
default_group = ['vzguest']
def get_guests():
# Loop through vzhosts
for h in vzhosts:
# SSH to vzhost and get the list of guests in json
pipe = Popen(['ssh', h, 'vzlist', '-j'], stdout=PIPE, universal_newlines=True)
# Load Json info of guests
json_data = json.loads(pipe.stdout.read())
# loop through guests
for j in json_data:
# Add information to host vars
inventory['_meta']['hostvars'][j['hostname']] = {
'ctid': j['ctid'],
'veid': j['veid'],
'vpsid': j['vpsid'],
'private_path': j['private'],
'root_path': j['root'],
'ip': j['ip']
}
# determine group from guest description
if j['description'] is not None:
groups = j['description'].split(",")
else:
groups = default_group
# add guest to inventory
for g in groups:
if g not in inventory:
inventory[g] = {'hosts': []}
inventory[g]['hosts'].append(j['hostname'])
return inventory
if len(sys.argv) == 2 and sys.argv[1] == '--list':
inv_json = get_guests()
print(json.dumps(inv_json, sort_keys=True))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({}))
else:
print("Need an argument, either --list or --host <host>")
| gpl-3.0 |
himanshu-setia/keystone | keystone/tests/unit/external/test_timeutils.py | 11 | 1232 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_utils import timeutils
import keystone.tests.unit as tests
class TestTimeUtils(tests.BaseTestCase):
def test_parsing_date_strings_returns_a_datetime(self):
example_date_str = '2015-09-23T04:45:37.196621Z'
dt = timeutils.parse_strtime(example_date_str, fmt=tests.TIME_FORMAT)
self.assertIsInstance(dt, datetime.datetime)
def test_parsing_invalid_date_strings_raises_a_ValueError(self):
example_date_str = ''
simple_format = '%Y'
self.assertRaises(ValueError,
timeutils.parse_strtime,
example_date_str,
fmt=simple_format)
| apache-2.0 |
snare/voltron | voltron/api.py | 1 | 11032 | import os
import logging
import socket
import select
import threading
import logging
import logging.config
import json
import inspect
import base64
import six
from collections import defaultdict
from scruffy.plugin import Plugin
import voltron
from .plugin import APIPlugin
log = logging.getLogger('api')
version = 1.1
class InvalidRequestTypeException(Exception):
"""
Exception raised when the client is requested to send an invalid request type.
"""
pass
class InvalidDebuggerHostException(Exception):
"""
Exception raised when the debugger host is invalid.
"""
pass
class InvalidViewNameException(Exception):
"""
Exception raised when an invalid view name is specified.
"""
pass
class InvalidMessageException(Exception):
"""
Exception raised when an invalid API message is received.
"""
pass
class ServerSideOnlyException(Exception):
"""
Exception raised when a server-side method is called on an APIMessage
subclass that exists on the client-side.
See @server_side decorator.
"""
pass
class ClientSideOnlyException(Exception):
"""
Exception raised when a server-side method is called on an APIMessage
subclass that exists on the client-side.
See @client_side decorator.
"""
pass
class DebuggerNotPresentException(Exception):
"""
Raised when an APIRequest is dispatched without a valid debugger present.
"""
pass
class NoSuchTargetException(Exception):
"""
Raised when an APIRequest specifies an invalid target.
"""
pass
class TargetBusyException(Exception):
"""
Raised when an APIRequest specifies a target that is currently busy and
cannot be queried.
"""
pass
class MissingFieldError(Exception):
"""
Raised when an APIMessage is validated and has a required field missing.
"""
pass
class NoSuchThreadException(Exception):
"""
Raised when the specified thread ID or index does not exist.
"""
pass
class UnknownArchitectureException(Exception):
"""
Raised when the debugger host is running in an unknown architecture.
"""
pass
class BlockingNotSupportedError(Exception):
"""
Raised when a view that does not support blocking connects to a debugger
host that does not support async mode.
"""
pass
def server_side(func):
"""
Decorator to designate an API method applicable only to server-side
instances.
This allows us to use the same APIRequest and APIResponse subclasses on the
client and server sides without too much confusion.
"""
def inner(*args, **kwargs):
if args and hasattr(args[0], 'is_server') and not voltron.debugger:
raise ServerSideOnlyException("This method can only be called on a server-side instance")
return func(*args, **kwargs)
return inner
def client_side(func):
"""
Decorator to designate an API method applicable only to client-side
instances.
This allows us to use the same APIRequest and APIResponse subclasses on the
client and server sides without too much confusion.
"""
def inner(*args, **kwargs):
if args and hasattr(args[0], 'is_server') and voltron.debugger:
raise ClientSideOnlyException("This method can only be called on a client-side instance")
return func(*args, **kwargs)
return inner
def cast_b(val):
if isinstance(val, six.binary_type):
return val
elif isinstance(val, six.text_type):
return val.encode('latin1')
return six.binary_type(val)
def cast_s(val):
if type(val) == six.text_type:
return val
elif type(val) == six.binary_type:
return val.decode('latin1')
return six.text_type(val)
class APIMessage(object):
"""
Top-level API message class.
"""
_top_fields = ['type']
_fields = {}
_encode_fields = []
type = None
def __init__(self, data=None, *args, **kwargs):
# process any data that was passed in
if data:
self.from_json(data)
# any other kwargs are treated as field values
for field in kwargs:
setattr(self, field, kwargs[field])
def __str__(self):
"""
Return a string (JSON) representation of the API message properties.
"""
return self.to_json()
def to_dict(self):
"""
Return a transmission-safe dictionary representation of the API message properties.
"""
d = {field: getattr(self, field) for field in self._top_fields if hasattr(self, field)}
# set values of data fields
d['data'] = {}
for field in self._fields:
if hasattr(self, field):
# base64 encode the field for transmission if necessary
if field in self._encode_fields:
val = getattr(self, field)
if val:
val = cast_s(base64.b64encode(cast_b(val)))
d['data'][field] = val
else:
d['data'][field] = getattr(self, field)
return d
def from_dict(self, d):
"""
Initialise an API message from a transmission-safe dictionary.
"""
for key in d:
if key == 'data':
for dkey in d['data']:
if dkey in self._encode_fields:
setattr(self, str(dkey), base64.b64decode(d['data'][dkey]))
else:
setattr(self, str(dkey), d['data'][dkey])
else:
setattr(self, str(key), d[key])
def to_json(self):
"""
Return a JSON representation of the API message properties.
"""
return json.dumps(self.to_dict())
def from_json(self, data):
"""
Initialise an API message from a JSON representation.
"""
try:
d = json.loads(data)
except ValueError:
raise InvalidMessageException()
self.from_dict(d)
def __getattr__(self, name):
"""
Attribute accessor.
If a defined field is requested that doesn't have a value set,
return None.
"""
if name in self._fields:
return None
def validate(self):
"""
Validate the message.
Ensure all the required fields are present and not None.
"""
required_fields = list(filter(lambda x: self._fields[x], self._fields.keys()))
for field in (self._top_fields + required_fields):
if not hasattr(self, field) or hasattr(self, field) and getattr(self, field) == None:
raise MissingFieldError(field)
class APIRequest(APIMessage):
"""
An API request object. Contains functions and accessors common to all API
request types.
Subclasses of APIRequest are used on both the client and server sides. On
the server side they are instantiated by Server's `handle_request()`
method. On the client side they are instantiated by whatever class is doing
the requesting (probably a view class).
"""
_top_fields = ['type', 'request', 'block', 'timeout']
_fields = {}
type = 'request'
request = None
block = False
timeout = 10
response = None
wait_event = None
timed_out = False
@server_side
def dispatch(self):
"""
In concrete subclasses this method will actually dispatch the request
to the debugger host and return a response. In this case it raises an
exception.
"""
raise NotImplementedError("Subclass APIRequest")
@server_side
def wait(self):
"""
Wait for the request to be dispatched.
"""
self.wait_event = threading.Event()
timeout = int(self.timeout) if self.timeout else None
self.timed_out = not self.wait_event.wait(timeout)
def signal(self):
"""
Signal that the request has been dispatched and can return.
"""
self.wait_event.set()
class APIBlockingRequest(APIRequest):
"""
An API request that blocks by default.
"""
block = True
class APIResponse(APIMessage):
"""
An API response object. Contains functions and accessors common to all API
response types.
Subclasses of APIResponse are used on both the client and server sides. On
the server side they are instantiated by the APIRequest's `dispatch` method
in order to serialise and send to the client. On the client side they are
instantiated by the Client class and returned by `send_request`.
"""
_top_fields = ['type', 'status']
_fields = {}
type = 'response'
status = None
@property
def is_success(self):
return self.status == 'success'
@property
def is_error(self):
return self.status == 'error'
def __repr__(self):
return "<%s: success = %s, error = %s, body: %s>" % (
str(self.__class__),
self.is_success,
self.is_error,
{f: getattr(self, f) for f in self._top_fields + list(self._fields.keys())}
)
class APISuccessResponse(APIResponse):
"""
A generic API success response.
"""
status = 'success'
class APIErrorResponse(APIResponse):
"""
A generic API error response.
"""
_fields = {'code': True, 'message': True}
status = 'error'
@property
def timed_out(self):
return self.code == APITimedOutErrorResponse.code
class APIGenericErrorResponse(APIErrorResponse):
code = 0x1000
message = "An error occurred"
def __init__(self, message=None):
super(APIGenericErrorResponse, self).__init__()
if message:
self.message = message
class APIInvalidRequestErrorResponse(APIErrorResponse):
code = 0x1001
message = "Invalid API request"
class APIPluginNotFoundErrorResponse(APIErrorResponse):
code = 0x1002
message = "Plugin was not found for request"
class APIDebuggerHostNotSupportedErrorResponse(APIErrorResponse):
code = 0x1003
message = "The targeted debugger host is not supported by this plugin"
class APITimedOutErrorResponse(APIErrorResponse):
code = 0x1004
message = "The request timed out"
class APIDebuggerNotPresentErrorResponse(APIErrorResponse):
code = 0x1004
message = "No debugger host was found"
class APINoSuchTargetErrorResponse(APIErrorResponse):
code = 0x1005
message = "No such target"
class APITargetBusyErrorResponse(APIErrorResponse):
code = 0x1006
message = "Target busy"
class APIMissingFieldErrorResponse(APIGenericErrorResponse):
code = 0x1007
message = "Missing field"
class APIEmptyResponseErrorResponse(APIGenericErrorResponse):
code = 0x1008
message = "Empty response"
class APIServerNotRunningErrorResponse(APIGenericErrorResponse):
code = 0x1009
message = "Server is not running"
| mit |
ondra-novak/chromium.src | build/android/gyp/push_libraries.py | 6 | 2326 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pushes native libraries to a device.
"""
import optparse
import os
import sys
BUILD_ANDROID_DIR = os.path.join(os.path.dirname(__file__), os.pardir)
sys.path.append(BUILD_ANDROID_DIR)
from pylib import constants
from util import build_device
from util import build_utils
from util import md5_check
def DoPush(options):
libraries = build_utils.ReadJson(options.libraries_json)
device = build_device.GetBuildDeviceFromPath(
options.build_device_configuration)
if not device:
return
serial_number = device.GetSerialNumber()
# A list so that it is modifiable in Push below.
needs_directory = [True]
for lib in libraries:
device_path = os.path.join(options.device_dir, lib)
host_path = os.path.join(options.libraries_dir, lib)
def Push():
if needs_directory:
device.RunShellCommand('mkdir -p ' + options.device_dir)
needs_directory[:] = [] # = False
device.PushChangedFiles(host_path, device_path)
record_path = '%s.%s.push.md5.stamp' % (host_path, serial_number)
md5_check.CallAndRecordIfStale(
Push,
record_path=record_path,
input_paths=[host_path],
input_strings=[device_path])
def main():
parser = optparse.OptionParser()
parser.add_option('--libraries-dir',
help='Directory that contains stripped libraries.')
parser.add_option('--device-dir',
help='Device directory to push the libraries to.')
parser.add_option('--libraries-json',
help='Path to the json list of native libraries.')
parser.add_option('--stamp', help='Path to touch on success.')
parser.add_option('--build-device-configuration',
help='Path to build device configuration.')
parser.add_option('--configuration-name',
help='The build CONFIGURATION_NAME')
options, _ = parser.parse_args()
required_options = ['libraries_dir', 'device_dir', 'libraries_json']
build_utils.CheckOptions(options, parser, required=required_options)
constants.SetBuildType(options.configuration_name)
DoPush(options)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
dhalperi/incubator-beam | sdks/python/apache_beam/utils/retry.py | 9 | 8086 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Retry decorators for calls raising exceptions.
For internal use only; no backwards-compatibility guarantees.
This module is used mostly to decorate all integration points where the code
makes calls to remote services. Searching through the code base for @retry
should find all such places. For this reason even places where retry is not
needed right now use a @retry.no_retries decorator.
"""
import logging
import random
import sys
import time
import traceback
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
# TODO(sourabhbajaj): Remove the GCP specific error code to a submodule
try:
from apitools.base.py.exceptions import HttpError
except ImportError:
HttpError = None
# pylint: enable=wrong-import-order, wrong-import-position
class PermanentException(Exception):
"""Base class for exceptions that should not be retried."""
pass
class FuzzedExponentialIntervals(object):
"""Iterable for intervals that are exponentially spaced, with fuzzing.
On iteration, yields retry interval lengths, in seconds. Every iteration over
this iterable will yield differently fuzzed interval lengths, as long as fuzz
is nonzero.
Args:
initial_delay_secs: The delay before the first retry, in seconds.
num_retries: The total number of times to retry.
factor: The exponential factor to use on subsequent retries.
Default is 2 (doubling).
fuzz: A value between 0 and 1, indicating the fraction of fuzz. For a
given delay d, the fuzzed delay is randomly chosen between
[(1 - fuzz) * d, d].
max_delay_secs: Maximum delay (in seconds). After this limit is reached,
further tries use max_delay_sec instead of exponentially increasing
the time. Defaults to 1 hour.
"""
def __init__(self, initial_delay_secs, num_retries, factor=2, fuzz=0.5,
max_delay_secs=60 * 60 * 1):
self._initial_delay_secs = initial_delay_secs
self._num_retries = num_retries
self._factor = factor
if not 0 <= fuzz <= 1:
raise ValueError('Fuzz parameter expected to be in [0, 1] range.')
self._fuzz = fuzz
self._max_delay_secs = max_delay_secs
def __iter__(self):
current_delay_secs = min(self._max_delay_secs, self._initial_delay_secs)
for _ in xrange(self._num_retries):
fuzz_multiplier = 1 - self._fuzz + random.random() * self._fuzz
yield current_delay_secs * fuzz_multiplier
current_delay_secs = min(
self._max_delay_secs, current_delay_secs * self._factor)
def retry_on_server_errors_filter(exception):
"""Filter allowing retries on server errors and non-HttpErrors."""
if (HttpError is not None) and isinstance(exception, HttpError):
return exception.status_code >= 500
return not isinstance(exception, PermanentException)
def retry_on_server_errors_and_timeout_filter(exception):
if HttpError is not None and isinstance(exception, HttpError):
if exception.status_code == 408: # 408 Request Timeout
return True
return retry_on_server_errors_filter(exception)
SERVER_ERROR_OR_TIMEOUT_CODES = [408, 500, 502, 503, 504, 598, 599]
class Clock(object):
"""A simple clock implementing sleep()."""
def sleep(self, value):
time.sleep(value)
def no_retries(fun):
"""A retry decorator for places where we do not want retries."""
return with_exponential_backoff(
retry_filter=lambda _: False, clock=None)(fun)
def with_exponential_backoff(
num_retries=7, initial_delay_secs=5.0, logger=logging.warning,
retry_filter=retry_on_server_errors_filter,
clock=Clock(), fuzz=True, factor=2, max_delay_secs=60 * 60):
"""Decorator with arguments that control the retry logic.
Args:
num_retries: The total number of times to retry.
initial_delay_secs: The delay before the first retry, in seconds.
logger: A callable used to report an exception. Must have the same signature
as functions in the standard logging module. The default is
logging.warning.
retry_filter: A callable getting the exception raised and returning True
if the retry should happen. For instance we do not want to retry on
404 Http errors most of the time. The default value will return true
for server errors (HTTP status code >= 500) and non Http errors.
clock: A clock object implementing a sleep method. The default clock will
use time.sleep().
fuzz: True if the delay should be fuzzed (default). During testing False
can be used so that the delays are not randomized.
factor: The exponential factor to use on subsequent retries.
Default is 2 (doubling).
max_delay_secs: Maximum delay (in seconds). After this limit is reached,
further tries use max_delay_sec instead of exponentially increasing
the time. Defaults to 1 hour.
Returns:
As per Python decorators with arguments pattern returns a decorator
for the function which in turn will return the wrapped (decorated) function.
The decorator is intended to be used on callables that make HTTP or RPC
requests that can temporarily timeout or have transient errors. For instance
the make_http_request() call below will be retried 16 times with exponential
backoff and fuzzing of the delay interval (default settings).
from apache_beam.utils import retry
# ...
@retry.with_exponential_backoff()
make_http_request(args)
"""
def real_decorator(fun):
"""The real decorator whose purpose is to return the wrapped function."""
def wrapper(*args, **kwargs):
retry_intervals = iter(
FuzzedExponentialIntervals(
initial_delay_secs, num_retries, factor,
fuzz=0.5 if fuzz else 0, max_delay_secs=max_delay_secs))
while True:
try:
return fun(*args, **kwargs)
except Exception as exn: # pylint: disable=broad-except
if not retry_filter(exn):
raise
# Get the traceback object for the current exception. The
# sys.exc_info() function returns a tuple with three elements:
# exception type, exception value, and exception traceback.
exn_traceback = sys.exc_info()[2]
try:
try:
sleep_interval = retry_intervals.next()
except StopIteration:
# Re-raise the original exception since we finished the retries.
raise exn, None, exn_traceback # pylint: disable=raising-bad-type
logger(
'Retry with exponential backoff: waiting for %s seconds before '
'retrying %s because we caught exception: %s '
'Traceback for above exception (most recent call last):\n%s',
sleep_interval,
getattr(fun, '__name__', str(fun)),
''.join(traceback.format_exception_only(exn.__class__, exn)),
''.join(traceback.format_tb(exn_traceback)))
clock.sleep(sleep_interval)
finally:
# Traceback objects in locals can cause reference cycles that will
# prevent garbage collection. Clear it now since we do not need
# it anymore.
sys.exc_clear()
exn_traceback = None
return wrapper
return real_decorator
| apache-2.0 |
SrNetoChan/Quantum-GIS | python/plugins/processing/algs/grass7/ext/v_net.py | 45 | 4994 | # -*- coding: utf-8 -*-
"""
***************************************************************************
v_net.py
--------
Date : December 2015
Copyright : (C) 2015 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
This Python module handles pre-treatment operations for v.net.* GRASS7 modules.
Before using a v.net module you often have to incorporate a points layer into
the network vector map.
"""
__author__ = 'Médéric Ribreux'
__date__ = 'December 2015'
__copyright__ = '(C) 2015, Médéric Ribreux'
import os
from qgis.core import QgsProcessingException
from processing.tools.system import getTempFilename
def incorporatePoints(alg, parameters, context, feedback, pointLayerName='points', networkLayerName='input'):
"""
incorporate points with lines to form a GRASS network
"""
# Grab the point layer and delete this parameter
pointLayer = alg.parameterAsVectorLayer(parameters, pointLayerName, context)
if pointLayer:
# Create an intermediate GRASS layer which is the combination of network + centers
intLayer = 'net' + os.path.basename(getTempFilename())
pointLayer = alg.exportedLayers[pointLayerName]
# Grab the network layer
lineLayer = alg.parameterAsVectorLayer(parameters, networkLayerName, context)
if lineLayer:
lineLayer = alg.exportedLayers[networkLayerName]
else:
raise QgsProcessingException(
alg.tr('GRASS GIS 7 v.net requires a lines layer!'))
threshold = alg.parameterAsDouble(parameters, 'threshold', context)
# Create the v.net connect command for point layer integration
command = 'v.net -s input={} points={} output={} operation=connect threshold={}'.format(
lineLayer, pointLayer, intLayer, threshold)
alg.commands.append(command)
# Connect the point layer database to the layer 2 of the network
command = 'v.db.connect -o map={} table={} layer=2'.format(intLayer, pointLayer)
alg.commands.append(command)
# remove undesired parameters
alg.removeParameter(pointLayerName)
# Use temp layer for input
alg.exportedLayers[networkLayerName] = intLayer
# Process the command
if 'threshold' in parameters:
alg.removeParameter('threshold')
alg.processCommand(parameters, context, feedback)
def variableOutput(alg, layers, parameters, context, nocats=True):
""" Handle variable data output for v.net modules:
:param layers:
layers is a dict of outputs:
{ 'outputName': ['srcLayer', 'output_type', output_layer_number, nocats],
...
}
where:
- outputName is the name of the output in the description file.
- srcLayer is the grass name of the layer to export.
- output_type is the GRASS datatype (point/line/area/etc.).
- output_layer_number is the GRASS layer number for multiple layers datasets.
- nocats indicates weither we need to also export without categories items.
:param parameters:
:param context:
:param nocats: do not add categories.
"""
for outputName, typeList in layers.items():
if not isinstance(typeList, list):
continue
file_name = alg.parameterAsOutputLayer(parameters, outputName, context)
src_layer = typeList[0]
output_type = typeList[1]
output_layer_number = typeList[2]
no_cats = typeList[3]
grass_name = '{}{}'.format(src_layer, alg.uniqueSuffix)
alg.exportVectorLayer(grassName=grass_name,
fileName=file_name,
layer=output_layer_number,
exportnocat=no_cats,
dataType=output_type)
def processOutputs(alg, parameters, context, feedback):
idx = alg.parameterAsInt(parameters, 'operation', context)
operations = alg.parameterDefinition('operation').options()
operation = operations[idx]
if operation == 'nodes':
outputParameter = {'output': ['output', 'point', 2, True]}
elif operation == 'connect':
outputParameter = {'output': ['output', 'line', 1, False]}
elif operation == 'arcs':
outputParameter = {'output': ['output', 'line', 1, True]}
variableOutput(alg, outputParameter, parameters, context)
| gpl-2.0 |
elingg/tensorflow | tensorflow/contrib/training/python/training/device_setter.py | 38 | 3951 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Strategies for placing variables on parameter servers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import tensor_shape
class GreedyLoadBalancingStrategy(object):
"""Returns the least-loaded ps task for op placement.
The load is calculated by a user-specified load function passed in at
construction. There are no units for load, and the load function is
responsible for providing an internally consistent measure.
Note that this strategy is very sensitive to the exact order in which
ps ops (typically variables) are created, as it greedily places ops
on the least-loaded ps at the point each op is processed.
One reasonable heuristic is the `variable_size_load_fn`, which
estimates load as the number of bytes that would be used to store and
transmit the entire variable. More advanced load functions
could consider the difference in access patterns across ops, or trade
off CPU-intensive ops with RAM-intensive ops with network bandwidth.
This class is intended to be used as a `ps_strategy` in
`tf.train.replica_device_setter`.
"""
def __init__(self, num_tasks, load_fn):
"""Create a new `LoadBalancingStrategy`.
Args:
num_tasks: Number of ps tasks to cycle among.
load_fn: A callable that takes an `Operation` and returns a
numeric load value for that op.
"""
self._num_tasks = num_tasks
self._load_fn = load_fn
self._ps_loads = np.zeros(num_tasks)
def __call__(self, op):
"""Choose a ps task index for the given `Operation`.
Args:
op: A `Operation` to be placed on ps.
Returns:
The next ps task index to use for the `Operation`. Greedily
places the op on the least-loaded ps task so far, as determined
by the load function.
"""
task = np.argmin(self._ps_loads)
self._ps_loads[task] += self._load_fn(op)
return task
def byte_size_load_fn(op):
"""Load function that computes the byte size of a single-output `Operation`.
This is intended to be used with `"Variable"` ops, which have a single
`Tensor` output with the contents of the variable. However, it can also be
used for calculating the size of any op that has a single output.
Intended to be used with `GreedyLoadBalancingStrategy`.
Args:
op: An `Operation` with a single output, typically a "Variable" op.
Returns:
The number of bytes in the output `Tensor`.
Raises:
ValueError: if `op` does not have a single output, or if the shape of the
single output is not fully-defined.
"""
if len(op.outputs) != 1:
raise ValueError("Op %s must have a single output" % op)
output = op.outputs[0]
elem_size = output.dtype.size
shape = output.get_shape()
if not shape.is_fully_defined():
# Due to legacy behavior, scalar "Variable" ops have output Tensors that
# have unknown shape when the op is created (and hence passed to this
# load function for placement), even though the scalar shape is set
# explicitly immediately afterward.
shape = tensor_shape.TensorShape(op.get_attr("shape"))
shape.assert_is_fully_defined()
return shape.num_elements() * elem_size
| apache-2.0 |
bayesimpact/bob-emploi | data_analysis/importer/test/city_locations_test.py | 1 | 1260 | """Tests for the bob_emploi.importer.city_locations module."""
from os import path
import unittest
from bob_emploi.data_analysis.lib import mongo
from bob_emploi.data_analysis.importer import city_locations
from bob_emploi.frontend.api import geo_pb2
class CityLocationsImporterTestCase(unittest.TestCase):
"""Unit tests for the city locations importer."""
test_data_folder = path.join(path.dirname(__file__), 'testdata')
stats_filename = path.join(test_data_folder, 'french_cities.csv')
urban_context_filename = path.join(test_data_folder, 'geo/french_urban_areas.xls')
def test_csv2dicts(self) -> None:
"""Test basic usage of the csv2dicts function."""
collection = city_locations.csv2dicts(self.stats_filename, self.urban_context_filename)
protos = dict(mongo.collection_to_proto_mapping(collection, geo_pb2.FrenchCity))
self.assertEqual(11, len(protos))
# Point check.
city = protos['39001']
self.assertAlmostEqual(47.0667, city.latitude, places=5)
self.assertAlmostEqual(5.38333, city.longitude, places=5)
other_city = protos['01002']
self.assertEqual(geo_pb2.PERIURBAN, other_city.urban_context)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
shaufi/odoo | openerp/exceptions.py | 312 | 3157 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" OpenERP core exceptions.
This module defines a few exception types. Those types are understood by the
RPC layer. Any other exception type bubbling until the RPC layer will be
treated as a 'Server error'.
If you consider introducing new exceptions, check out the test_exceptions addon.
"""
# kept for backward compatibility
class except_orm(Exception):
def __init__(self, name, value):
self.name = name
self.value = value
self.args = (name, value)
class Warning(Exception):
pass
class RedirectWarning(Exception):
""" Warning with a possibility to redirect the user instead of simply
diplaying the warning message.
Should receive as parameters:
:param int action_id: id of the action where to perform the redirection
:param string button_text: text to put on the button that will trigger
the redirection.
"""
class AccessDenied(Exception):
""" Login/password error. No message, no traceback. """
def __init__(self):
super(AccessDenied, self).__init__('Access denied.')
self.traceback = ('', '', '')
class AccessError(except_orm):
""" Access rights error. """
def __init__(self, msg):
super(AccessError, self).__init__('AccessError', msg)
class MissingError(except_orm):
""" Missing record(s). """
def __init__(self, msg):
super(MissingError, self).__init__('MissingError', msg)
class ValidationError(except_orm):
def __init__(self, msg):
super(ValidationError, self).__init__('ValidateError', msg)
class DeferredException(Exception):
""" Exception object holding a traceback for asynchronous reporting.
Some RPC calls (database creation and report generation) happen with
an initial request followed by multiple, polling requests. This class
is used to store the possible exception occuring in the thread serving
the first request, and is then sent to a polling request.
('Traceback' is misleading, this is really a exc_info() triple.)
"""
def __init__(self, msg, tb):
self.message = msg
self.traceback = tb
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
bootandy/sqlalchemy | test/orm/test_immediate_load.py | 33 | 1693 | """basic tests of lazy loaded attributes"""
from sqlalchemy import testing
from sqlalchemy.orm import mapper, relationship, create_session, immediateload
from sqlalchemy.testing import eq_
from test.orm import _fixtures
class ImmediateTest(_fixtures.FixtureTest):
run_inserts = 'once'
run_deletes = None
def test_basic_option(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties={
'addresses':relationship(Address)
})
sess = create_session()
l = sess.query(User).options(immediateload(User.addresses)).filter(users.c.id==7).all()
eq_(len(sess.identity_map), 2)
sess.close()
eq_(
[User(id=7, addresses=[Address(id=1, email_address='jack@bean.com')])],
l
)
def test_basic(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties={
'addresses':relationship(Address, lazy='immediate')
})
sess = create_session()
l = sess.query(User).filter(users.c.id==7).all()
eq_(len(sess.identity_map), 2)
sess.close()
eq_(
[User(id=7, addresses=[Address(id=1, email_address='jack@bean.com')])],
l
)
| mit |
thydeyx/LeetCode-Python | Max_Points_on_a_Line.py | 1 | 1237 | # -*- coding:utf-8 -*-
import sys
"""
主要分为两种情况,排列在x垂直的直线上,斜率相同时
"""
class Point(object):
def __init__(self, x = 0, y = 0):
self.x = x
self.y = y
class Solution(object):
def cross(self, p1, p2):
return p1.x * p2.y - p2.x * p1.y
def onSegment(self, p1, p2, p3):
if cross(p3 - p1, p3 - p2) == 0:
return True
return False
def maxPoints(self, points):
ret = 0
dict_x = {}
for p1 in points:
c = 0
dict_k = {}
maxK = 0
for p2 in points:
if p1 == p2:
continue
if p1.x != p2.x:
k = ((p1.y - p2.y) * 1.0) / ((p1.x - p2.x) * 1.0)
now_k = dict_k.setdefault(k, 1)
dict_k[k] = now_k + 1
if now_k + 1 > maxK:
maxK = now_k + 1
if p1.x == p2.x and p1.y == p2.y:
c += 1
for key in dict_k.iterkeys():
dict_k[key] += c
if dict_k[key] > ret:
ret = dict_k[key]
now_x = dict_x.get(p1.x, 0)
dict_x[p1.x] = now_x + 1
if now_x + 1 > ret:
ret = now_x + 1
return ret
if __name__ == "__main__":
s = Solution()
point1 = Point(1, 1)
point2 = Point(1, 1)
point3 = Point(2, 2)
point4 = Point(2, 2)
points = [point1, point2, point3, point4]
#points = [point2]
print s.maxPoints(points)
| mit |
ICYrom/android_external_gtest | test/gtest_throw_on_failure_test.py | 21 | 5778 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import unittest
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = os.path.join(gtest_test_utils.GetBuildDir(),
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
return gtest_test_utils.Subprocess(command).exit_code == 0
# The tests. TODO(wan@google.com): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(unittest.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
vially/xbmc-decorator | lib/decorator.py | 11 | 9345 | ########################## LICENCE ###############################
# Copyright (c) 2005-2012, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
__version__ = '3.3.3'
__all__ = ["decorator", "FunctionMaker", "partial"]
import sys, re, inspect
try:
from functools import partial
except ImportError: # for Python version < 2.5
class partial(object):
"A simple replacement of functools.partial"
def __init__(self, func, *args, **kw):
self.func = func
self.args = args
self.keywords = kw
def __call__(self, *otherargs, **otherkw):
kw = self.keywords.copy()
kw.update(otherkw)
return self.func(*(self.args + otherargs), **kw)
if sys.version >= '3':
from inspect import getfullargspec
else:
class getfullargspec(object):
"A quick and dirty replacement for getfullargspec for Python 2.X"
def __init__(self, f):
self.args, self.varargs, self.varkw, self.defaults = \
inspect.getargspec(f)
self.kwonlyargs = []
self.kwonlydefaults = None
def __iter__(self):
yield self.args
yield self.varargs
yield self.varkw
yield self.defaults
DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
if sys.version < '3': # easy way
self.shortsignature = self.signature = \
inspect.formatargspec(
formatvalue=lambda val: "", *argspec)[1:-1]
else: # Python 3 way
self.signature = self.shortsignature = ', '.join(self.args)
if self.varargs:
self.signature += ', *' + self.varargs
self.shortsignature += ', *' + self.varargs
if self.kwonlyargs:
for a in self.kwonlyargs:
self.signature += ', %s=None' % a
self.shortsignature += ', %s=%s' % (a, a)
if self.varkw:
self.signature += ', **' + self.varkw
self.shortsignature += ', **' + self.varkw
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.func_defaults = getattr(self, 'defaults', ())
func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
func.__annotations__ = getattr(self, 'annotations', None)
callermodule = sys._getframe(3).f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.match(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline just for safety
src += '\n' # this is needed in old versions of Python
try:
code = compile(src, '<string>', 'single')
# print >> sys.stderr, 'Compiling %s' % src
exec code in evaldict
except:
print >> sys.stderr, 'Error in generated code:'
print >> sys.stderr, src
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an attribute
__source__ is added to the result. The attributes attrs are added,
if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] #strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
return self.make('def %(name)s(%(signature)s):\n' + ibody,
evaldict, addsource, **attrs)
def decorator(caller, func=None):
"""
decorator(caller) converts a caller function into a decorator;
decorator(caller, func) decorates a function using a caller.
"""
if func is not None: # returns a decorated function
evaldict = func.func_globals.copy()
evaldict['_call_'] = caller
evaldict['_func_'] = func
return FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, undecorated=func, __wrapped__=func)
else: # returns a decorator
if isinstance(caller, partial):
return partial(decorator, caller)
# otherwise assume caller is a function
first = inspect.getargspec(caller)[0][0] # first arg
evaldict = caller.func_globals.copy()
evaldict['_call_'] = caller
evaldict['decorator'] = decorator
return FunctionMaker.create(
'%s(%s)' % (caller.__name__, first),
'return decorator(_call_, %s)' % first,
evaldict, undecorated=caller, __wrapped__=caller,
doc=caller.__doc__, module=caller.__module__)
| bsd-2-clause |
taigaio/taiga-ncurses | taiga_ncurses/core.py | 3 | 4411 | # -*- coding: utf-8 -*-
"""
taiga_ncurses.core
~~~~~~~~~~~~~~~~~~
"""
import functools
from concurrent.futures import wait
import urwid
from taiga_ncurses.ui import views
from taiga_ncurses import controllers
from taiga_ncurses.config import settings
class TaigaCore:
def __init__(self, executor, settings, authenticated=False, draw=True):
self.executor = executor
self.settings = settings
self.draw = draw
if authenticated:
self.state_machine = StateMachine(self, state=StateMachine.PROJECTS)
self.controller = self._build_projects_controller()
else:
self.state_machine = StateMachine(self, state=StateMachine.LOGIN)
self.controller = self._build_login_controller()
# Main Loop
self.loop = urwid.MainLoop(self.controller.view.widget,
palette=self.settings.palette,
unhandled_input=self.key_handler,
handle_mouse=True,
pop_ups=True)
def run(self):
self.loop.run()
def key_handler(self, key):
if key == settings.data.main.keys.quit:
self.settings.save()
raise urwid.ExitMainLoop
elif key == settings.data.main.keys.debug:
self.debug()
else:
return self.controller.handle(key)
def debug(self):
self.loop.screen.stop()
import ipdb; ipdb.set_trace()
self.loop.screen.start()
def login_view(self):
pass
def projects_view(self):
self.controller = self._build_projects_controller()
self.transition()
def project_view(self, project):
self.controller = self._build_project_controller(project)
self.transition()
def transition(self):
if self.draw:
self.loop.widget = self.controller.view.widget
self.loop.widget._invalidate()
self.loop.draw_screen()
def set_auth_config(self, auth_data):
self.settings.data.auth.token = auth_data["auth_token"]
def _build_login_controller(self):
login_view = views.auth.LoginView('username', 'password')
login_controller = controllers.auth.LoginController(login_view,
self.executor,
self.state_machine)
return login_controller
def _build_projects_controller(self):
projects_view = views.projects.ProjectsView()
projects_controller = controllers.projects.ProjectsController(projects_view,
self.executor,
self.state_machine)
return projects_controller
def _build_project_controller(self, project):
project_view = views.projects.ProjectDetailView(project)
project_controller = controllers.projects.ProjectDetailController(project_view,
self.executor,
self.state_machine)
return project_controller
class StateMeta(type):
def __new__(cls, clsname, bases, dct):
state_attrs = [k for k in dct if k.isupper()]
state_set = {dct[s] for s in state_attrs}
assert len(state_attrs) == len(state_set), "State attributes must be unique"
dct["STATES"] = state_set
return super().__new__(cls, clsname, bases, dct)
class StateMachine(metaclass=StateMeta):
LOGIN = 0
PROJECTS = 1
PROJECT_BACKLOG = 2
PROJECT_MILESTONES = 3
PROJECT_ISSUES = 4
PROJECT_WIKI = 5
PROJECT_ADMIN = 6
def __init__(self, core, state):
self._core = core
self.state = state
def logged_in(self, auth_data):
self._core.set_auth_config(auth_data)
self._core.projects_view()
def projects(self):
self._core.projects_view()
def project_detail(self, project):
self._core.project_view(project)
def transition(self, state):
assert state in self.STATES, "{0} is not a valid state".format(state)
self.state = state
self.refresh()
def refresh(self):
self._core.transition()
| apache-2.0 |
schaul/py-vgdl | vgdl/stateobs.py | 3 | 8138 | '''
Created on 2013 3 13
@author: Tom Schaul (schaul@gmail.com)
Managing states and observations, for different types of games
'''
import pygame
from pybrain.utilities import setAllArgs
from ontology import RotatingAvatar, BASEDIRS, GridPhysics, ShootAvatar, kill_effects
from core import Avatar
from tools import listRotate
class StateObsHandler(object):
""" Managing different types of state representations,
and of observations.
A state is always composed of a tuple, with the avatar position (x,y) in the first places.
If the avatar orientation matters, the orientation is the third element of the tuple.
"""
# is the avatar having an orientation or not?
orientedAvatar = False
# is the avatar a single persistent sprite, or can it be transformed?
uniqueAvatar = True
# can the avatar die?
mortalAvatar = False
# can other sprites die?
mortalOther = False
# can other sprites move
staticOther = True
def __init__(self, game, **kwargs):
setAllArgs(self, kwargs)
self._game = game
self._avatar_types = []
self._abs_avatar_types = []
self._other_types = []
self._mortal_types = []
for skey in sorted(game.sprite_constr):
sclass, _, stypes = game.sprite_constr[skey]
if issubclass(sclass, Avatar):
self._abs_avatar_types += stypes[:-1]
self._avatar_types += [stypes[-1]]
if issubclass(sclass, RotatingAvatar) or issubclass(sclass, ShootAvatar):
self.orientedAvatar = True
if skey not in game.sprite_groups:
continue
ss = game.sprite_groups[skey]
if len(ss) == 0:
continue
if isinstance(ss[0], Avatar):
assert issubclass(ss[0].physicstype, GridPhysics), \
'Not supported: Game must have grid physics, has %s'\
% (self._avatar.physicstype.__name__)
else:
self._other_types += [skey]
if not ss[0].is_static:
self.staticOther = False
assert self.staticOther, "not yet supported: all non-avatar sprites must be static. "
self._avatar_types = sorted(set(self._avatar_types).difference(self._abs_avatar_types))
self.uniqueAvatar = (len(self._avatar_types) == 1)
#assert self.uniqueAvatar, 'not yet supported: can only have one avatar class'
# determine mortality
for skey, _, effect, _ in game.collision_eff:
if effect in kill_effects:
if skey in self._avatar_types+self._abs_avatar_types:
self.mortalAvatar = True
if skey in self._other_types:
self.mortalOther = True
self._mortal_types += [skey]
# retain observable features, and their colors
self._obstypes = {}
self._obscols = {}
for skey in self._other_types:
ss = game.sprite_groups[skey]
self._obstypes[skey] = [self._sprite2state(sprite, oriented=False)
for sprite in ss if sprite.is_static]
self._obscols[skey] = ss[0].color
if self.mortalOther:
self._gravepoints = {}
for skey in self._mortal_types:
for s in self._game.sprite_groups[skey]:
self._gravepoints[(skey, self._rect2pos(s.rect))] = True
@property
def _avatar(self):
ss = self._game.getAvatars()
assert len(ss) <= 1, 'Not supported: Only a single avatar can be used, found %s' % ss
if len(ss) == 0:
return None
return ss[0]
def setState(self, state):
pos = (state[0]*self._game.block_size, state[1]*self._game.block_size)
# no avatar?
if self._avatar is None:
assert self.mortalAvatar
if self.uniqueAvatar:
atype = self._avatar_types[0]
else:
atype = state[-1]
self._game._createSprite([atype], pos)
# bad avatar?
if not self.uniqueAvatar:
atype = state[-1]
if self._avatar.name != atype:
self._game.kill_list.append(self._avatar)
self._game._createSprite([atype], pos)
if not self.uniqueAvatar:
state = state[:-1]
if self.mortalOther:
self._setPresences(state[-1])
state = state[:-1]
self._setSpriteState(self._avatar, state)
self._avatar.lastrect = self._avatar.rect
self._avatar.lastmove = 0
def getState(self):
if self._avatar is None:
return (-1,-1, 'dead')
if self.mortalOther:
if self.uniqueAvatar:
return tuple(list(self._sprite2state(self._avatar)) + [self._getPresences()])
else:
return tuple(list(self._sprite2state(self._avatar))
+ [self._getPresences()] + [self._avatar.name])
else:
if self.uniqueAvatar:
return self._sprite2state(self._avatar)
else:
return tuple(list(self._sprite2state(self._avatar))
+ [self._avatar.name])
def _getPresences(self):
""" Binary vector of which non-avatar sprites are present. """
res = []
for skey, pos in sorted(self._gravepoints):
if pos in [self._rect2pos(s.rect) for s in self._game.sprite_groups[skey]
if s not in self._game.kill_list]:
res.append(1)
else:
res.append(0)
return tuple(res)
def _setPresences(self, p):
for i, (skey, pos) in enumerate(sorted(self._gravepoints)):
target = p[i] != 0
matches = [s for s in self._game.sprite_groups[skey] if self._rect2pos(s.rect)==pos]
current = (not len(matches) == 0 and matches[0] not in self._game.kill_list)
if current == target:
continue
elif current:
#print 'die', skey, pos, matches
self._game.kill_list.append(matches[0])
elif target:
#print 'live', skey, pos, matches
pos = (pos[0]*self._game.block_size, pos[1]*self._game.block_size)
self._game._createSprite([skey], pos)
def _rawSensor(self, state):
return [(state in ostates) for _, ostates in sorted(self._obstypes.items())[::-1]]
def _sprite2state(self, s, oriented=None):
pos = self._rect2pos(s.rect)
if oriented is None and self.orientedAvatar:
return (pos[0], pos[1], s.orientation)
else:
return pos
def _rect2pos(self, r):
return (r.left / self._game.block_size, r.top / self._game.block_size)
def _setRectPos(self, s, pos):
s.rect = pygame.Rect((pos[0] * self._game.block_size,
pos[1] * self._game.block_size),
(self._game.block_size, self._game.block_size))
def _setSpriteState(self, s, state):
if self.orientedAvatar:
s.orientation = state[2]
self._setRectPos(s, (state[0], state[1]))
def _stateNeighbors(self, state):
""" Can be different in subclasses...
By default: current position and four neighbors. """
pos = (state[0], state[1])
ns = [(a[0] + pos[0], a[1] + pos[1]) for a in BASEDIRS]
if self.orientedAvatar:
# subjective perspective, so we rotate the view according to the current orientation
ns = listRotate(ns, BASEDIRS.index(state[2]))
return ns
else:
return ns
| bsd-3-clause |
ecosoft-odoo/odoo | addons/portal/mail_thread.py | 390 | 2004 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-TODAY OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import osv
class mail_thread(osv.AbstractModel):
""" Update of mail_mail class, to add the signin URL to notifications. """
_inherit = 'mail.thread'
def _get_inbox_action_xml_id(self, cr, uid, context=None):
""" For a given message, return an action that either
- opens the form view of the related document if model, res_id, and
read access to the document
- opens the Inbox with a default search on the conversation if model,
res_id
- opens the Inbox with context propagated
"""
cur_user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context)
# if uid is a portal user -> action is different
if any(group.is_portal for group in cur_user.groups_id):
return ('portal', 'action_mail_inbox_feeds_portal')
else:
return super(mail_thread, self)._get_inbox_action_xml_id(cr, uid, context=context)
| agpl-3.0 |
jbtule/keyczar-dev | cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/Tool/mssdk.py | 19 | 2345 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mssdk.py 4043 2009/02/23 09:06:45 scons"
"""engine.SCons.Tool.mssdk
Tool-specific initialization for Microsoft SDKs, both Platform
SDKs and Windows SDKs.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
from SCons.Tool.MSCommon.sdk import detect_sdk, \
set_default_sdk, \
set_sdk_by_directory, \
set_sdk_by_version
def generate(env):
"""Add construction variables for an MS SDK to an Environment."""
if env.has_key('MSSDK_DIR'):
set_sdk_by_directory(env, env.subst('$MSSDK_DIR'))
return
if env.has_key('MSSDK_VERSION'):
set_sdk_by_version(env, env.subst('$MSSDK_VERSION'))
return
if env.has_key('MSVS_VERSION'):
set_default_sdk(env, env['MSVS_VERSION'])
#print "No MSVS_VERSION: this is likely to be a bug"
return
def exists(env):
return detect_sdk()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
wooyek/django-tasker | manage.py | 1 | 1188 | #!/usr/bin/env python
import logging
import os
import sys
logging.basicConfig(format='%(asctime)s %(levelname)-7s %(thread)-5d %(name)s %(filename)s:%(lineno)s | %(funcName)s | %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logging.getLogger().setLevel(logging.DEBUG)
logging.disable(logging.NOTSET)
logging.debug("Importing: %s" % __file__)
from pathlib import Path
SRC_PATH = str(Path(__file__).parent.absolute())
def filter_deprecation_warnings(record):
warnings_to_suppress = [
'RemovedInDjango110Warning'
]
msg = record.getMessage()
return not any([warn in msg
for warn in warnings_to_suppress
if not msg.startswith(SRC_PATH)])
logging.getLogger('py.warnings').addFilter(filter_deprecation_warnings)
if __name__ == "__main__":
if SRC_PATH not in sys.path:
sys.path.append(SRC_PATH)
logging.debug("PATH:\n\t%s", "\n\t".join(os.sys.path))
# assert(os.environ.get('DJANGO_SETTINGS_MODULE') == 'tests.test_settings')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.test_settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| mit |
nlandais/ansible-modules-core | cloud/amazon/ec2_vpc_net.py | 16 | 9898 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_net
short_description: Configure AWS virtual private clouds
description:
- Create or terminate AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "2.0"
author: Jonathan Davila (@defionscode)
options:
name:
description:
- The name to give your VPC. This is used in combination with the cidr_block paramater to determine if a VPC already exists.
required: yes
cidr_block:
description:
- The CIDR of the VPC
required: yes
tenancy:
description:
- Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
required: false
default: default
choices: [ 'default', 'dedicated' ]
dns_support:
description:
- Whether to enable AWS DNS support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dns_hostnames:
description:
- Whether to enable AWS hostname support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dhcp_opts_id:
description:
- the id of the DHCP options to use for this vpc
default: null
required: false
tags:
description:
- The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different.
default: None
required: false
aliases: [ 'resource_tags' ]
state:
description:
- The state of the VPC. Either absent or present.
default: present
required: false
choices: [ 'present', 'absent' ]
multi_ok:
description:
- By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created.
default: false
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create a VPC with dedicate tenancy and a couple of tags
- ec2_vpc_net:
name: Module_dev2
cidr_block: 10.10.0.0/16
region: us-east-1
tags:
module: ec2_vpc_net
this: works
tenancy: dedicated
'''
import time
import sys
try:
import boto
import boto.ec2
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def vpc_exists(module, vpc, name, cidr_block, multi):
"""Returns True or False in regards to the existence of a VPC. When supplied
with a CIDR, it will check for matching tags to determine if it is a match
otherwise it will assume the VPC does not exist and thus return false.
"""
matched_vpc = None
try:
matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block})
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
if len(matching_vpcs) == 1:
matched_vpc = matching_vpcs[0]
elif len(matching_vpcs) > 1:
if multi:
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
'CIDR block you specified. If you would like to create '
'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
return matched_vpc
def update_vpc_tags(vpc, module, vpc_obj, tags, name):
if tags is None:
tags = dict()
tags.update({'Name': name})
try:
current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id}))
if cmp(tags, current_tags):
vpc.create_tags(vpc_obj.id, tags)
return True
else:
return False
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
if vpc_obj.dhcp_options_id != dhcp_id:
connection.associate_dhcp_options(dhcp_id, vpc_obj.id)
return True
else:
return False
def get_vpc_values(vpc_obj):
if vpc_obj is not None:
vpc_values = vpc_obj.__dict__
if "region" in vpc_values:
vpc_values.pop("region")
if "item" in vpc_values:
vpc_values.pop("item")
if "connection" in vpc_values:
vpc_values.pop("connection")
return vpc_values
else:
return None
def main():
argument_spec=ec2_argument_spec()
argument_spec.update(dict(
name = dict(type='str', default=None, required=True),
cidr_block = dict(type='str', default=None, required=True),
tenancy = dict(choices=['default', 'dedicated'], default='default'),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
dhcp_opts_id = dict(type='str', default=None, required=False),
tags = dict(type='dict', required=False, default=None, aliases=['resource_tags']),
state = dict(choices=['present', 'absent'], default='present'),
multi_ok = dict(type='bool', default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
name=module.params.get('name')
cidr_block=module.params.get('cidr_block')
tenancy=module.params.get('tenancy')
dns_support=module.params.get('dns_support')
dns_hostnames=module.params.get('dns_hostnames')
dhcp_id=module.params.get('dhcp_opts_id')
tags=module.params.get('tags')
state=module.params.get('state')
multi=module.params.get('multi_ok')
changed=False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if dns_hostnames and not dns_support:
module.fail_json('In order to enable DNS Hostnames you must also enable DNS support')
if state == 'present':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is None:
try:
vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy)
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
if dhcp_id is not None:
try:
if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
if tags is not None or name is not None:
try:
if update_vpc_tags(connection, module, vpc_obj, tags, name):
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
# Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute
# which is needed in order to detect the current status of DNS options. For now we just update
# the attribute each time and is not used as a changed-factor.
try:
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support)
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames)
except BotoServerError, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
# get the vpc obj again in case it has changed
try:
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
except BotoServerError, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
elif state == 'absent':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is not None:
try:
connection.delete_vpc(vpc_obj.id)
vpc_obj = None
changed = True
except BotoServerError, e:
e_msg = boto_exception(e)
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
"and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
jdwittenauer/ionyx | ionyx/experiment.py | 1 | 19762 | import pickle
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import get_scorer
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.model_selection import cross_validate, learning_curve, train_test_split
from .print_message import PrintMessageMixin
class Experiment(PrintMessageMixin):
"""
Provides functionality to create and run machine learning experiments. Designed to
serve as a "wrapper" for running an experiment. This class provides methods for
training, cross-validation, parameter tuning etc. The main value proposition is
in providing a simplified API for common tasks, layering useful reporting and logging
on top, and reconciling capabilities between several popular libraries.
Parameters
----------
package : {'sklearn', 'xgboost', 'keras', 'prophet'}
The source package of the model used in the experiment. Some capabilities are
available only using certain packages.
model : object
An instantiated supervised learning model. Must be API compatible with
scikit-learn estimators. Pipelines are also supported.
scoring_metric : string
Name of the metric to use to score models. Text must match a valid scikit-learn
metric.
eval_metric : string, optional, default None
Separate metric used specifically for evaluation such as hold-out sets during
training. Text must match an evaluation metric supported by the package the
model originates from.
n_jobs : int, optional, default 1
Number of parallel processes to use (where functionality is available).
verbose : boolean, optional, default True
If true, messages will be written to the console.
logger : object, optional, default None
An instantiated log writer with an open file handle. If provided, messages
will be written to the log file.
data : DataFrame, optional, default None
The data set to be used for the experiment. Provides the option to specify the
data at initialization vs. passing in training data and labels with each
function call. If "data" is specified then "X_columns" and "y_column" must also
be specified.
X_columns : list, optional, default None
List of columns in "data" to use for the training set.
y_column : string, optional, default None
Name of the column in "data" to use as a label for supervised learning.
cv : object, optional, default None
A cross-validation strategy. Accepts all options considered valid by
scikit-learn.
Attributes
----------
scorer_ : object
Scikit-learn scoring function for the provided scoring metric.
best_model_ : object
The best model found during a parameter search.
"""
def __init__(self, package, model, scoring_metric, eval_metric=None, n_jobs=1, verbose=True,
logger=None, data=None, X_columns=None, y_column=None, cv=None):
PrintMessageMixin.__init__(self, verbose, logger)
self.package = package
self.model = model
self.scoring_metric = scoring_metric
self.eval_metric = eval_metric
self.n_jobs = n_jobs
self.scorer_ = get_scorer(self.scoring_metric)
self.best_model_ = None
self.data = data
if self.data is not None:
if X_columns and y_column:
self.X = data[X_columns].values
self.y = data[y_column].values
else:
raise Exception('X and y columns must be specified if data set is provided.')
self.cv = cv
self.print_message('Beginning experiment...')
self.print_message('Package = {0}'.format(package))
self.print_message('Scoring Metric = {0}'.format(scoring_metric))
self.print_message('Evaluation Metric = {0}'.format(eval_metric))
self.print_message('Parallel Jobs = {0}'.format(n_jobs))
self.print_message('Model:')
self.print_message(model, pprint=True)
self.print_message('Parameters:')
self.print_message(model.get_params(), pprint=True)
def train_model(self, X=None, y=None, validate=False, early_stopping=False,
early_stopping_rounds=None, plot_eval_history=False, fig_size=16):
"""
Trains a new model using the provided training data.
Parameters
----------
X : array-like, optional, default None
Training input samples. Must be specified if no data was provided during
initialization.
y : array-like, optional, default None
Target values. Must be specified if no data was provided during
initialization.
validate : boolean, optional, default False
Evaluate model on a hold-out set during training.
early_stopping : boolean, optional, default False
Stop training the model when performance on a validation set begins to drop.
Eval must be enabled.
early_stopping_rounds : int, optional, default None
Number of training iterations to allow before stopping training due to
performance on a validation set. Eval and early_stopping must be enabled.
plot_eval_history : boolean, optional, default False
Plot model performance as a function of training time. Eval must be enabled.
fig_size : int, optional, default 16
Size of the evaluation history plot.
"""
if X is not None:
self.X = X
if y is not None:
self.y = y
self.print_message('Beginning model training...')
self.print_message('X dimensions = {0}'.format(self.X.shape))
self.print_message('y dimensions = {0}'.format(self.y.shape))
v = 1 if self.verbose else 0
t0 = time.time()
if self.package not in ['sklearn', 'xgboost', 'keras', 'prophet']:
raise Exception('Package not supported.')
if validate and self.package in ['xgboost', 'keras']:
X_train, X_eval, y_train, y_eval = train_test_split(self.X, self.y, test_size=0.1)
training_history = None
min_eval_loss = None
min_eval_epoch = None
if early_stopping:
if self.package == 'xgboost':
self.model.fit(X_train, y_train, eval_set=[(X_eval, y_eval)],
eval_metric=self.eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=self.verbose)
elif self.package == 'keras':
from keras.callbacks import EarlyStopping
callbacks = [
EarlyStopping(monitor='val_loss', patience=early_stopping_rounds)
]
training_history = self.model.fit(X_train, y_train, verbose=v,
validation_data=(X_eval, y_eval),
callbacks=callbacks)
else:
if self.package == 'xgboost':
self.model.fit(X_train, y_train, eval_set=[(X_eval, y_eval)],
eval_metric=self.eval_metric, verbose=self.verbose)
elif self.package == 'keras':
training_history = self.model.fit(X_train, y_train, verbose=v,
validation_data=(X_eval, y_eval))
if self.package == 'xgboost':
training_history = self.model.evals_result()['validation_0'][self.eval_metric]
min_eval_loss = min(training_history)
min_eval_epoch = training_history.index(min(training_history)) + 1
elif self.package == 'keras':
training_history = training_history.history['val_loss']
min_eval_loss = min(training_history)
min_eval_epoch = training_history.index(min(training_history)) + 1
if plot_eval_history:
df = pd.DataFrame(training_history, columns=['Eval Loss'])
df.plot(figsize=(fig_size, fig_size * 3 / 4))
t1 = time.time()
self.print_message('Model training complete in {0:3f} s.'.format(t1 - t0))
self.print_message('Training score = {0}'
.format(self.scorer_(self.model, X_train, y_train)))
self.print_message('Min. evaluation score = {0}'.format(min_eval_loss))
self.print_message('Min. evaluation epoch = {0}'.format(min_eval_epoch))
elif validate:
raise Exception('Package does not support evaluation during training.')
else:
if self.package == 'keras':
self.model.set_params(verbose=v)
self.model.fit(self.X, self.y)
t1 = time.time()
self.print_message('Model training complete in {0:3f} s.'.format(t1 - t0))
self.print_message('Training score = {0}'
.format(self.scorer_(self.model, self.X, self.y)))
def cross_validate(self, X=None, y=None, cv=None):
"""
Performs cross-validation to estimate the true performance of the model.
Parameters
----------
X : array-like, optional, default None
Training input samples. Must be specified if no data was provided during
initialization.
y : array-like, optional, default None
Target values. Must be specified if no data was provided during
initialization.
cv : object, optional, default None
A cross-validation strategy. Accepts all options considered valid by
scikit-learn. Must be specified if no cv was passed in during
initialization.
"""
if X is not None:
self.X = X
if y is not None:
self.y = y
if cv is not None:
self.cv = cv
self.print_message('Beginning cross-validation...')
self.print_message('X dimensions = {0}'.format(self.X.shape))
self.print_message('y dimensions = {0}'.format(self.y.shape))
self.print_message('Cross-validation strategy = {0}'.format(self.cv))
t0 = time.time()
if self.package not in ['sklearn', 'xgboost', 'keras', 'prophet']:
raise Exception('Package not supported.')
if self.package == 'keras':
self.model.set_params(verbose=0)
results = cross_validate(self.model, self.X, self.y, scoring=self.scoring_metric,
cv=self.cv, n_jobs=self.n_jobs, verbose=0,
return_train_score=True)
t1 = time.time()
self.print_message('Cross-validation complete in {0:3f} s.'.format(t1 - t0))
train_score = np.mean(results['train_score'])
test_score = np.mean(results['test_score'])
self.print_message('Training score = {0}'.format(train_score))
self.print_message('Cross-validation score = {0}'.format(test_score))
def learning_curve(self, X=None, y=None, cv=None, fig_size=16):
"""
Plots a learning curve showing model performance against both training and
validation data sets as a function of the number of training samples.
Parameters
----------
X : array-like, optional, default None
Training input samples. Must be specified if no data was provided during
initialization.
y : array-like, optional, default None
Target values. Must be specified if no data was provided during
initialization.
cv : object, optional, default None
A cross-validation strategy. Accepts all options considered valid by
scikit-learn. Must be specified if no cv was passed in during
initialization.
fig_size : int, optional, default 16
Size of the plot.
"""
if X is not None:
self.X = X
if y is not None:
self.y = y
if cv is not None:
self.cv = cv
self.print_message('Plotting learning curve...')
self.print_message('X dimensions = {0}'.format(self.X.shape))
self.print_message('y dimensions = {0}'.format(self.y.shape))
self.print_message('Cross-validation strategy = {0}'.format(self.cv))
t0 = time.time()
if self.package not in ['sklearn', 'xgboost', 'keras', 'prophet']:
raise Exception('Package not supported.')
if self.package == 'keras':
self.model.set_params(verbose=0)
values = learning_curve(self.model, self.X, self.y, cv=self.cv,
scoring=self.scoring_metric, n_jobs=self.n_jobs, verbose=0)
train_sizes, train_scores, test_scores = values
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
fig, ax = plt.subplots(figsize=(fig_size, fig_size * 3 / 4))
ax.set_title('Learning Curve')
ax.set_xlabel('Training Examples')
ax.set_ylabel('Score')
ax.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color='b')
ax.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color='r')
ax.plot(train_sizes, train_scores_mean, 'o-', color='b', label='Training score')
ax.plot(train_sizes, test_scores_mean, 'o-', color='r', label='Cross-validation score')
ax.legend(loc='best')
fig.tight_layout()
t1 = time.time()
self.print_message('Plot generation complete in {0:3f} s.'.format(t1 - t0))
def param_search(self, param_grid, X=None, y=None, cv=None, search_type='grid',
n_iter=100, save_results_path=None):
"""
Conduct a search over some pre-defined set of hyper-parameter configurations
to find the best-performing set of parameter.
Parameters
----------
param_grid : list, dict
Parameter search space. See scikit-learn documentation for GridSearchCV and
RandomSearchCV for acceptable formatting.
X : array-like, optional, default None
Training input samples. Must be specified if no data was provided during
initialization.
y : array-like, optional, default None
Target values. Must be specified if no data was provided during
initialization.
cv : object, optional, default None
A cross-validation strategy. Accepts all options considered valid by
scikit-learn. Must be specified if no cv was passed in during
initialization.
search_type : {'grid', 'random'}, optional, default 'grid'
Specifies use of grid search or random search. Requirements for param_grid
are different depending on which method is used. See scikit-learn
documentation for GridSearchCV and RandomSearchCV for details.
n_iter : int, optional, default 100
Number of search iterations to run. Only applies to random search.
save_results_path : string, optional, default None
Specifies a location to save the full results of the search in format.
File name should end in .csv.
"""
if X is not None:
self.X = X
if y is not None:
self.y = y
if cv is not None:
self.cv = cv
self.print_message('Beginning hyper-parameter search...')
self.print_message('X dimensions = {0}'.format(self.X.shape))
self.print_message('y dimensions = {0}'.format(self.y.shape))
self.print_message('Cross-validation strategy = {0}'.format(self.cv))
t0 = time.time()
if self.package not in ['sklearn', 'xgboost', 'keras', 'prophet']:
raise Exception('Package not supported.')
if self.package == 'keras':
self.model.set_params(verbose=0)
if search_type == 'grid':
search = GridSearchCV(self.model, param_grid=param_grid, scoring=self.scoring_metric,
n_jobs=self.n_jobs, cv=self.cv, refit=self.scoring_metric,
verbose=0, return_train_score=True)
elif search_type == 'random':
search = RandomizedSearchCV(self.model, param_grid, n_iter=n_iter,
scoring=self.scoring_metric, n_jobs=self.n_jobs,
cv=self.cv, refit=self.scoring_metric, verbose=0,
return_train_score=True)
else:
raise Exception('Search type not supported.')
search.fit(self.X, self.y)
t1 = time.time()
self.print_message('Hyper-parameter search complete in {0:3f} s.'.format(t1 - t0))
self.print_message('Best score found = {0}'.format(search.best_score_))
self.print_message('Best parameters found:')
self.print_message(search.best_params_, pprint=True)
self.best_model_ = search.best_estimator_
if save_results_path:
results = pd.DataFrame(search.cv_results_)
results = results.sort_values(by='mean_test_score', ascending=False)
results.to_csv(save_results_path, index=False)
def load_model(self, filename):
"""
Load a previously trained model from disk.
Parameters
----------
filename : string
Location of the file to read.
"""
self.print_message('Loading model...')
t0 = time.time()
if self.package in ['sklearn', 'xgboost', 'prophet']:
model_file = open(filename, 'rb')
self.model = pickle.load(model_file)
model_file.close()
elif self.package == 'keras':
from keras.models import load_model
self.model.model = load_model(filename)
else:
raise Exception('Package not supported.')
t1 = time.time()
self.print_message('Model loaded in {0:3f} s.'.format(t1 - t0))
def save_model(self, filename):
"""
Persist a trained model to disk.
Parameters
----------
filename : string
Location of the file to write. Scikit-learn, XGBoost, and Prophet use
pickle to write to disk (use .pkl extension for clarity) while Keras has a
built-in save function that uses the HDF5 file format, so Keras models must
have a .h5 extension.
"""
self.print_message('Saving model...')
t0 = time.time()
if self.package in ['sklearn', 'xgboost', 'prophet']:
model_file = open(filename, 'wb')
pickle.dump(self.model, model_file)
model_file.close()
elif self.package == 'keras':
if hasattr(self.model, 'model'):
self.model.model.save(filename)
else:
raise Exception('Keras model must be fit before saving.')
else:
raise Exception('Package not supported.')
t1 = time.time()
self.print_message('Model saved in {0:3f} s.'.format(t1 - t0))
| apache-2.0 |
ChrisLR/Python-Roguelike-Template | scenes/game/windows/game_window.py | 1 | 4901 | import tdl
from clubsandwich.geom import Point, Size
from clubsandwich.ui import RectView
from ui.camera import Camera
from util.cursor import Cursor
class GameWindow(RectView):
def __init__(self, game_context, **kwargs):
super().__init__(fill=True, **kwargs)
self.game_context = game_context
player = self.game_context.player
self.camera = Camera(location=player.location.copy(), screen_size=Size(120, 30))
self.camera.character_focus = player
def draw(self, ctx):
player = self.game_context.player
current_level = player.location.level
self.camera.focus_on_game_object()
self.set_tiles_background_color(ctx)
player_x = player.location.local_x
player_y = player.location.local_y
def is_transparent_callback(x, y):
if x <= 0 or y <= 0:
return False
return self.is_transparent(current_level, x, y)
player.fov = tdl.map.quickFOV(player_x, player_y, is_transparent_callback, 'basic')
# NB: Order in which things are render is important
self.draw_map(player.fov, ctx)
self.draw_items(player, ctx)
self.draw_characters(player, ctx)
self.draw_player(player, ctx)
def draw_map(self, viewer_fov, ctx):
current_level = self.camera.location.level
for x, y in viewer_fov:
if not x >= len(current_level.maze) and not y >= len(current_level.maze[x]):
relative_point = self.camera.transform(Point(x, y))
if relative_point is not None:
ctx.printf(
relative_point,
current_level.maze[x][y].display.get_draw_info()
)
current_level.maze[x][y].is_explored = True
def draw_items(self, player, ctx):
current_level = self.camera.location.level
for item in current_level.spawned_items:
x, y = item.location.get_local_coords()
if (x, y) in player.fov:
relative_point = self.camera.transform(Point(x, y))
if relative_point is not None:
ctx.printf(
relative_point,
item.display.get_draw_info()
)
def draw_characters(self, player, ctx):
# draw monsters
current_level = self.camera.location.level
for monster in current_level.spawned_monsters:
x, y = monster.location.get_local_coords()
if (x, y) in player.fov:
relative_point = self.camera.transform(Point(x, y))
if relative_point is not None:
ctx.printf(
relative_point,
monster.display.get_draw_info()
)
def draw_player(self, player, ctx):
relative_point = self.camera.transform(Point(*player.location.get_local_coords()))
if relative_point is not None:
ctx.printf(relative_point, player.display.get_draw_info())
if isinstance(self.camera.character_focus, Cursor):
cursor = self.camera.character_focus
relative_point = self.camera.transform(Point(*cursor.location.get_local_coords()))
if relative_point is not None:
ctx.printf(relative_point, cursor.display.get_draw_info())
def set_tiles_background_color(self, ctx):
# TODO Instead of using a different color, we should darken whatever color it is.
# TODO Allowing us to use many colors as walls and tiles to create levels with different looks.
current_level = self.camera.location.level
for y in range(current_level.height):
for x in range(current_level.width):
relative_point = self.camera.transform(Point(x, y))
if relative_point is not None:
tile = current_level.maze[x][y]
wall = tile.block_sight
ground = tile.is_ground
if tile.is_explored:
if wall:
ctx.printf(relative_point, '[color=gray]#')
elif ground:
ctx.printf(relative_point, '[color=gray].')
@staticmethod
def is_transparent(current_level, x, y):
"""
Used by map.quickFOV to determine which tile fall within the players "field of view"
"""
try:
# Pass on IndexErrors raised for when a player gets near the edge of the screen
# and tile within the field of view fall out side the bounds of the maze.
tile = current_level.maze[x][y]
if tile.block_sight and tile.is_blocked:
return False
else:
return True
except IndexError:
return False
| mit |
oxtopus/nupic | tests/swarming/nupic/swarming/experiments/field_threshold_temporal/permutations.py | 4 | 5557 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by ExpGenerator to generate the actual
permutations.py file by replacing $XXXXXXXX tokens with desired values.
This permutations.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
import os
from nupic.swarming.permutationhelpers import *
# The name of the field being predicted. Any allowed permutation MUST contain
# the prediction field.
# (generated from PREDICTION_FIELD)
predictedField = 'attendance'
permutations = {
'modelParams': {
'sensorParams': {
'encoders': {
'attendance': PermuteEncoder(fieldName='attendance', encoderClass='AdaptiveScalarEncoder', maxval=36067, n=PermuteInt(13, 500, 25), clipInput=True, w=7, minval=0),
'visitor_winloss': PermuteEncoder(fieldName='visitor_winloss', encoderClass='AdaptiveScalarEncoder', maxval=0.786, n=PermuteInt(13, 500, 25), clipInput=True, w=7, minval=0),
'timestamp_dayOfWeek': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.dayOfWeek', radius=PermuteChoices([1, 3]), w=7),
'precip': PermuteEncoder(fieldName='precip', encoderClass='SDRCategoryEncoder', w=7, n=100),
'timestamp_timeOfDay': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.timeOfDay', radius=PermuteChoices([1, 8]), w=7),
'daynight': PermuteEncoder(fieldName='daynight', encoderClass='SDRCategoryEncoder', w=7, n=100),
'home_winloss': PermuteEncoder(fieldName='home_winloss', encoderClass='AdaptiveScalarEncoder', maxval=0.7, n=PermuteInt(13, 500, 25), clipInput=True, w=7, minval=0),
},
},
'tpParams': {
'minThreshold': PermuteInt(9, 12),
'activationThreshold': PermuteInt(12, 16),
},
}
}
# Fields selected for final hypersearch report;
# NOTE: These values are used as regular expressions by RunPermutations.py's
# report generator
# (fieldname values generated from PERM_PREDICTED_FIELD_NAME)
report = [
'.*attendance.*',
]
# Permutation optimization setting: either minimize or maximize metric
# used by RunPermutations.
# NOTE: The value is used as a regular expressions by RunPermutations.py's
# report generator
# (generated from minimize = 'nonprediction:aae:window=1000:field=attendance')
minimize = 'prediction:aae:window=1000:field=attendance'
#############################################################################
def dummyModelParams(perm):
""" This function can be used for Hypersearch algorithm development. When
present, we don't actually run the CLA model in the OPF, but instead run
a dummy model. This function returns the dummy model params that will be
used. See the OPFDummyModelRunner class source code (in
nupic.swarming.ModelRunner) for a description of the schema for
the dummy model params.
"""
#By contribution order from most to least A,B,C,D,E,F
#Want A,E combo to give the most contribution followed by A,D then A,C
errScore = 100
#A
if not perm['modelParams']['sensorParams']['encoders']['visitor_winloss'] \
is None:
errScore -= 25
#B
if not perm['modelParams']['sensorParams']['encoders']['home_winloss'] \
is None:
errScore -= 20
#C
if not perm['modelParams']['sensorParams']['encoders']\
['timestamp_timeOfDay'] is None:
errScore -= 15
#D
if not perm['modelParams']['sensorParams']['encoders']\
['timestamp_dayOfWeek'] is None:
errScore -= 10
#E
if not perm['modelParams']['sensorParams']['encoders']['precip'] is None:
errScore -= 5
#F
if not perm['modelParams']['sensorParams']['encoders']['daynight'] is None:
errScore += 10
dummyModelParams = dict(
metricValue = errScore,
metricFunctions = None,
)
return dummyModelParams
#############################################################################
def permutationFilter(perm):
""" This function can be used to selectively filter out specific permutation
combinations. It is called by RunPermutations for every possible permutation
of the variables in the permutations dict. It should return True for valid a
combination of permutation values and False for an invalid one.
Parameters:
---------------------------------------------------------
perm: dict of one possible combination of name:value
pairs chosen from permutations.
"""
# An example of how to use this
#if perm['__consumption_encoder']['maxval'] > 300:
# return False;
#
return True
| gpl-3.0 |
takahashiminoru/ryu | ryu/services/protocols/vrrp/sample_manager.py | 56 | 3663 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
sample router manager.
(un-)instantiate routers
Usage example:
PYTHONPATH=. ./bin/ryu-manager --verbose \
ryu.services.protocols.vrrp.manager \
ryu.services.protocols.vrrp.dumper \
ryu.services.protocols.vrrp.sample_manager
"""
from ryu.base import app_manager
from ryu.controller import handler
from ryu.services.protocols.vrrp import event as vrrp_event
from ryu.services.protocols.vrrp import sample_router
class RouterManager(app_manager.RyuApp):
_ROUTER_CLASSES = {
vrrp_event.VRRPInterfaceNetworkDevice: {
4: sample_router.RouterIPV4Linux,
6: sample_router.RouterIPV6Linux,
},
vrrp_event.VRRPInterfaceOpenFlow: {
4: sample_router.RouterIPV4OpenFlow,
6: sample_router.RouterIPV6OpenFlow,
},
}
def __init__(self, *args, **kwargs):
super(RouterManager, self).__init__(*args, **kwargs)
self._args = args
self._kwargs = kwargs
self.routers = {} # instance name -> router name
def _router_factory(self, instance_name, monitor_name, interface, config):
cls = None
for interface_cls, router_clses in self._ROUTER_CLASSES.items():
if isinstance(interface, interface_cls):
if config.is_ipv6:
cls = router_clses[6]
else:
cls = router_clses[4]
break
self.logger.debug('interface %s %s', type(interface), interface)
self.logger.debug('cls %s', cls)
if cls is None:
raise ValueError('Unknown interface type %s %s' % (type(interface),
interface))
kwargs = self._kwargs.copy()
kwargs.update({
'name': instance_name,
'monitor_name': monitor_name,
'config': config,
'interface': interface,
})
app_mgr = app_manager.AppManager.get_instance()
return app_mgr.instantiate(cls, *self._args, **kwargs)
@handler.set_ev_cls(vrrp_event.EventVRRPStateChanged)
def vrrp_state_changed_handler(self, ev):
if ev.new_state == vrrp_event.VRRP_STATE_INITIALIZE:
if ev.old_state:
self._shutdown(ev)
else:
self._initialize(ev)
return
router_name = self.routers.get(ev.instance_name)
self.send_event(router_name, ev)
def _initialize(self, ev):
router = self._router_factory(ev.instance_name, ev.monitor_name,
ev.interface, ev.config)
self.routers[ev.instance_name] = router.name
self.send_event(router.name, ev)
router.start()
def _shutdown(self, ev):
router_name = self.routers.pop(ev.instance_name)
self.send_event(router_name, ev)
app_mgr = app_manager.AppManager.get_instance()
app_mgr.uninstantiate(router_name)
| apache-2.0 |
flacjacket/sympy | sympy/galgebra/examples/coords.py | 3 | 1042 | #!/usr/bin/python
#EandM.py
import sys
import sympy.galgebra.GA as GA
import sympy.galgebra.latex_ex as tex
import sympy,numpy,time
if __name__ == '__main__':
metric = '1 0 0,'+\
'0 1 0,'+\
'0 0 1'
gamma_x,gamma_y,gamma_z = GA.MV.setup('gamma_x gamma_y gamma_z',metric,True)
tex.Format()
coords = r,theta,phi = sympy.symbols('r theta phi')
x = r*(sympy.cos(theta)*gamma_z+sympy.sin(theta)*\
(sympy.cos(phi)*gamma_x+sympy.sin(phi)*gamma_y))
x.set_name('x')
GA.MV.rebase(x,coords,'e',True)
psi = GA.MV('psi','scalar',fct=True)
dpsi = psi.grad()
print 'Gradient of Scalar Function $\\psi$'
print '\\nabla\\psi =',dpsi
A = GA.MV('A','vector',fct=True)
print 'Div and Curl of Vector Function $A$'
print A
gradA = A.grad()
I = GA.MV(GA.ONE,'pseudo')
divA = A.grad_int()
curlA = -I*A.grad_ext()
print '\\nabla \\cdot A =',divA
tex.MV_format(3)
print '-I\\lp\\nabla \\W A\\rp =',curlA
tex.xdvi(filename='coords.tex')
| bsd-3-clause |
yakky/django | tests/gis_tests/gdal_tests/test_driver.py | 335 | 1253 | import unittest
from django.contrib.gis.gdal import HAS_GDAL
if HAS_GDAL:
from django.contrib.gis.gdal import Driver, GDALException
valid_drivers = (
# vector
'ESRI Shapefile', 'MapInfo File', 'TIGER', 'S57', 'DGN', 'Memory', 'CSV',
'GML', 'KML',
# raster
'GTiff', 'JPEG', 'MEM', 'PNG',
)
invalid_drivers = ('Foo baz', 'clucka', 'ESRI Shp', 'ESRI rast')
aliases = {
'eSrI': 'ESRI Shapefile',
'TigER/linE': 'TIGER',
'SHAPE': 'ESRI Shapefile',
'sHp': 'ESRI Shapefile',
'tiFf': 'GTiff',
'tIf': 'GTiff',
'jPEg': 'JPEG',
'jpG': 'JPEG',
}
@unittest.skipUnless(HAS_GDAL, "GDAL is required")
class DriverTest(unittest.TestCase):
def test01_valid_driver(self):
"Testing valid GDAL/OGR Data Source Drivers."
for d in valid_drivers:
dr = Driver(d)
self.assertEqual(d, str(dr))
def test02_invalid_driver(self):
"Testing invalid GDAL/OGR Data Source Drivers."
for i in invalid_drivers:
self.assertRaises(GDALException, Driver, i)
def test03_aliases(self):
"Testing driver aliases."
for alias, full_name in aliases.items():
dr = Driver(alias)
self.assertEqual(full_name, str(dr))
| bsd-3-clause |
jimporter/bfg9000 | bfg9000/builtins/compile.py | 1 | 17364 | from collections import defaultdict
from . import builtin
from .. import options as opts
from .path import buildpath, relname, within_directory
from .file_types import FileList, make_file_list, static_file
from ..backends.compdb import writer as compdb
from ..backends.make import writer as make
from ..backends.ninja import writer as ninja
from ..build_inputs import build_input, Edge
from ..file_types import *
from ..iterutils import first, flatten, iterate, unlistify
from ..objutils import convert_each, convert_one
from ..path import Path
from ..shell import posix as pshell
build_input('compile_options')(lambda build_inputs, env: defaultdict(list))
class BaseCompile(Edge):
desc_verb = 'compile'
def __init__(self, context, name, internal_options, directory=None,
extra_deps=None, description=None):
build = context.build
if name is None:
name = self.compiler.default_name(self.file, self)
if directory:
name = within_directory(Path(name), directory).suffix
else:
name = relname(context, name)
extra_options = self.compiler.pre_output(context, name, self)
self._internal_options = opts.option_list(
internal_options, extra_options
)
output = self.compiler.output_file(name, self)
primary = first(output)
options = self.options
compiler = self.compiler
public_output = compiler.post_output(context, options, output, self)
primary.post_install = compiler.post_install(options, output, self)
super().__init__(build, output, public_output, extra_deps, description)
@property
def options(self):
return self._internal_options + self.user_options
def flags(self, global_options=None):
return self.compiler.flags(self.options, global_options,
self.raw_output)
@staticmethod
def convert_args(context, kwargs):
convert_one(kwargs, 'directory', lambda x: buildpath(context, x, True))
return kwargs
class Compile(BaseCompile):
def __init__(self, context, name, *, includes, include_deps, pch, libs,
packages, options, lang=None, directory=None, extra_deps=None,
description=None):
self.includes = includes
self.include_deps = include_deps
self.packages = packages
self.user_options = options
internal_options = opts.option_list(opts.include_dir(i)
for i in self.includes)
# Don't bother handling forward_opts from libs now, since the only
# languages that need libs during compilation don't support static
# linking anyway.
if self.compiler.needs_libs:
self.libs = libs
internal_options.extend(opts.lib(i) for i in self.libs)
if self.compiler.needs_package_options:
internal_options.collect(i.compile_options(self.compiler)
for i in self.packages)
self.pch = pch
if self.pch:
if not self.compiler.accepts_pch:
raise TypeError('pch not supported for this compiler')
internal_options.append(opts.pch(self.pch))
super().__init__(context, name, internal_options, directory,
extra_deps, description)
@staticmethod
def convert_args(context, lang, kwargs):
def pch(file, **kwargs):
return context['precompiled_header'](file, file, **kwargs)
includes = kwargs.get('includes')
kwargs['include_deps'] = [
i for i in iterate(includes)
if isinstance(i, CodeFile) or getattr(i, 'creator', None)
]
convert_each(kwargs, 'includes', context['header_directory'])
convert_each(kwargs, 'libs', context['library'], lang=lang)
convert_each(kwargs, 'packages', context['package'], lang=lang)
kwargs['options'] = pshell.listify(kwargs.get('options'),
type=opts.option_list)
convert_one(kwargs, 'pch', pch, includes=includes,
packages=kwargs['packages'], options=kwargs['options'],
lang=lang)
kwargs = BaseCompile.convert_args(context, kwargs)
return kwargs
def add_extra_options(self, options):
self._internal_options.extend(options)
# PCH files should always be built with the same options as files using
# them, so forward the extra options onto the PCH if it exists.
if self.pch and hasattr(self.pch.creator, 'add_extra_options'):
self.pch.creator.add_extra_options(options)
class CompileSource(Compile):
def __init__(self, context, name, file, *, lang=None, **kwargs):
builder_lang = lang or getattr(file, 'lang', None)
if builder_lang is None:
raise ValueError('unable to determine language for file {!r}'
.format(file.path))
self.file = file
self.compiler = context.env.builder(builder_lang).compiler
super().__init__(context, name, **kwargs)
@classmethod
def convert_args(cls, context, file, kwargs):
lang = kwargs.get('lang')
if lang is None:
# Check if the input file needs to be forwarded on to
# `generated_source`.
guessed_file = context['auto_file'](file)
if getattr(guessed_file, 'lang', None) is None:
raise ValueError('unable to determine language for file {!r}'
.format(guessed_file.path))
builder = context.env.builder(guessed_file.lang)
if hasattr(builder, 'compiler'):
# This builder supports compilation; no need to forward to
# `generated_source`.
file = context['source_file'](file)
else:
# Pop off the `directory` argument and pass it to
# `generated_source`. This puts the intermediate source file in
# the `directory`, and then our final object file will
# automatically go there as well without needing the
# `directory` itself.
file = context['generated_source'](
guessed_file, directory=kwargs.pop('directory', None)
)
else:
file = context['source_file'](file, lang=lang)
return file, super().convert_args(context, lang or file.lang, kwargs)
class CompileHeader(Compile):
desc_verb = 'compile-header'
def __init__(self, context, name, file, *, source, lang=None, **kwargs):
builder_lang = lang or getattr(file, 'lang', None)
if builder_lang is None:
raise ValueError('unable to determine language for file {!r}'
.format(file.path))
self.file = file
self.pch_source = source
self.compiler = context.env.builder(builder_lang).pch_compiler
super().__init__(context, name, **kwargs)
@classmethod
def convert_args(cls, context, file, kwargs):
lang = kwargs.get('lang')
file = context['header_file'](file, lang=lang)
file_lang = lang or file.lang
convert_one(kwargs, 'source', context['source_file'], lang=file_lang)
return file, super().convert_args(context, file_lang, kwargs)
class GenerateSource(BaseCompile):
desc_verb = 'generate'
def __init__(self, context, name, file, *, options, lang=None,
directory=None, extra_deps=None, description=None):
builder_lang = lang or getattr(file, 'lang', None)
if builder_lang is None:
raise ValueError('unable to determine language for file {!r}'
.format(file.path))
self.file = file
self.user_options = options
self.compiler = context.env.builder(builder_lang).transpiler
super().__init__(context, name, None, directory, extra_deps,
description)
@classmethod
def convert_args(cls, context, file, kwargs):
lang = kwargs.get('lang')
file = context['auto_file'](file, lang=lang)
kwargs['options'] = pshell.listify(kwargs.get('options'),
type=opts.option_list)
return file, super().convert_args(context, kwargs)
@builtin.function()
@builtin.type(ObjectFile, extra_in_type=type(None))
def object_file(context, name=None, file=None, **kwargs):
if file is None:
if name is None:
raise TypeError('expected name')
dist = kwargs.pop('dist', True)
params = [('format', context.env.target_platform.object_format),
('lang', context.build['project']['lang'])]
return static_file(context, ObjectFile, name, dist, params, kwargs)
file, kwargs = CompileSource.convert_args(context, file, kwargs)
return CompileSource(context, name, file, **kwargs).public_output
@builtin.function()
@builtin.type(FileList, in_type=object)
def object_files(context, files, **kwargs):
@builtin.type(ObjectFile, extra_in_type=CodeFile)
def make_object_file(file, **kwargs):
file, kwargs = CompileSource.convert_args(context, file, kwargs)
return CompileSource(context, None, file, **kwargs).public_output
return make_file_list(context, make_object_file, files, **kwargs)
@builtin.function()
@builtin.type(PrecompiledHeader, extra_in_type=type(None))
def precompiled_header(context, name=None, file=None, **kwargs):
if file is None:
if name is None:
raise TypeError('expected name')
dist = kwargs.pop('dist', True)
params = [('lang', context.build['project']['lang'])]
return static_file(context, PrecompiledHeader, name, dist, params,
kwargs)
file, kwargs = CompileHeader.convert_args(context, file, kwargs)
return CompileHeader(context, name, file, **kwargs).public_output
@builtin.function()
@builtin.type(CodeFile, short_circuit=False, first_optional=True)
def generated_source(context, name, file, **kwargs):
file, kwargs = GenerateSource.convert_args(context, file, kwargs)
return GenerateSource(context, name, file, **kwargs).public_output
@builtin.function()
@builtin.type(FileList, in_type=object)
def generated_sources(context, files, **kwargs):
return make_file_list(context, context['generated_source'], files,
**kwargs)
@builtin.function()
def global_options(context, options, lang):
for i in iterate(lang):
context.build['compile_options'][i].extend(pshell.listify(
options, type=opts.option_list
))
def _get_flags(backend, rule, build_inputs, buildfile):
variables = {}
cmd_kwargs = {}
compiler = rule.compiler
if hasattr(compiler, 'flags_var'):
gopts = build_inputs['compile_options'][compiler.lang]
global_cflags, cflags = backend.flags_vars(
compiler.flags_var,
compiler.global_flags + compiler.flags(gopts, mode='global'),
buildfile
)
cmd_kwargs['flags'] = cflags
flags = rule.flags(gopts)
if flags:
variables[cflags] = [global_cflags] + flags
return variables, cmd_kwargs
@make.rule_handler(CompileSource, CompileHeader, GenerateSource)
def make_compile(rule, build_inputs, buildfile, env):
compiler = rule.compiler
variables, cmd_kwargs = _get_flags(make, rule, build_inputs, buildfile)
output_params = []
if compiler.num_outputs == 'all':
output_vars = make.qvar('@')
else:
output_vars = []
for i in range(compiler.num_outputs):
v = make.var(str(i + 1))
output_vars.append(v)
output_params.append(rule.output[i])
recipename = make.var('RULE_{}'.format(compiler.rule_name.upper()))
if not buildfile.has_variable(recipename):
recipe_extra = []
# Only GCC-style depfiles are supported by Make.
if compiler.deps_flavor == 'gcc':
depfixer = env.tool('depfixer')
cmd_kwargs['deps'] = deps = first(output_vars) + '.d'
recipe_extra = [make.Silent(depfixer(deps))]
buildfile.define(recipename, [compiler(
make.qvar('<'), output_vars, **cmd_kwargs
)] + recipe_extra)
deps = []
if getattr(rule, 'pch_source', None):
deps.append(rule.pch_source)
deps.append(rule.file)
if getattr(rule, 'pch', None):
deps.append(rule.pch)
deps.extend(getattr(rule, 'include_deps', []))
if getattr(rule, 'libs', None):
deps.extend(rule.libs)
deps.extend(flatten(i.deps for i in getattr(rule, 'packages', [])))
if compiler.deps_flavor == 'gcc':
depfile = rule.output[0].path.addext('.d')
build_inputs.add_target(File(depfile))
buildfile.include(depfile, optional=True)
make.multitarget_rule(
build_inputs, buildfile,
targets=rule.output,
deps=deps + rule.extra_deps,
order_only=make.directory_deps(rule.output),
recipe=make.Call(recipename, *output_params),
variables=variables
)
@ninja.rule_handler(CompileSource, CompileHeader, GenerateSource)
def ninja_compile(rule, build_inputs, buildfile, env):
compiler = rule.compiler
variables, cmd_kwargs = _get_flags(ninja, rule, build_inputs, buildfile)
if rule.description:
variables['description'] = rule.description
if compiler.num_outputs == 'all':
output_vars = ninja.var('out')
elif compiler.num_outputs == 1:
output_vars = ninja.var('output')
variables[output_vars] = rule.output[0]
else:
output_vars = []
for i in range(compiler.num_outputs):
v = ninja.var('output{}'.format(i + 1))
output_vars.append(v)
variables[v] = rule.output[i]
if not buildfile.has_rule(compiler.rule_name):
depfile = None
deps = None
if compiler.deps_flavor == 'gcc':
deps = 'gcc'
cmd_kwargs['deps'] = depfile = ninja.var('out') + '.d'
elif compiler.deps_flavor == 'msvc':
deps = 'msvc'
cmd_kwargs['deps'] = True
desc = rule.desc_verb + ' => ' + first(output_vars)
buildfile.rule(name=compiler.rule_name, command=compiler(
ninja.var('in'), output_vars, **cmd_kwargs
), depfile=depfile, deps=deps, description=desc)
inputs = [rule.file]
implicit_deps = []
if getattr(rule, 'pch', None):
implicit_deps.append(rule.pch)
if getattr(rule, 'pch_source', None):
inputs = [rule.pch_source]
implicit_deps.append(rule.file)
implicit_deps.extend(getattr(rule, 'include_deps', []))
if getattr(rule, 'libs', None):
implicit_deps.extend(rule.libs)
implicit_deps.extend(flatten(
i.deps for i in getattr(rule, 'packages', [])
))
# Ninja doesn't support multiple outputs and deps-parsing at the same time,
# so just use the first output and set up an alias if necessary. Aliases
# aren't perfect, since the build can get out of sync if you delete the
# "alias" file, but it's close enough.
if compiler.deps_flavor in ('gcc', 'msvc') and len(rule.output) > 1:
output = rule.output[0]
buildfile.build(
output=rule.output[1:],
rule='phony',
inputs=rule.output[0]
)
else:
output = rule.output
buildfile.build(
output=output,
rule=compiler.rule_name,
inputs=inputs,
implicit=implicit_deps + rule.extra_deps,
variables=variables
)
@compdb.rule_handler(CompileSource, CompileHeader, GenerateSource)
def compdb_compile(rule, build_inputs, buildfile, env):
compiler = rule.compiler
cmd_kwargs = {}
if hasattr(compiler, 'flags_var'):
gopts = build_inputs['compile_options'][compiler.lang]
cmd_kwargs['flags'] = (compiler.global_flags +
compiler.flags(gopts, mode='global') +
rule.flags(gopts))
if compiler.deps_flavor == 'gcc':
cmd_kwargs['deps'] = (first(rule.output).path.addext('.d')
if compiler.deps_flavor == 'gcc' else None)
elif compiler.deps_flavor == 'msvc':
cmd_kwargs['deps'] = True
output = unlistify(rule.output if compiler.num_outputs == 'all'
else rule.output[0:compiler.num_outputs])
buildfile.append(
arguments=compiler(rule.file, output, **cmd_kwargs),
file=rule.file, output=first(rule.public_output)
)
try:
from ..backends.msbuild import writer as msbuild
@msbuild.rule_handler(CompileSource, CompileHeader)
def msbuild_compile(rule, build_inputs, solution, env):
# MSBuild does compilation and linking in one unit; see link.py.
pass
@msbuild.rule_handler(GenerateSource)
def msbuild_generate_source(rule, build_inputs, solution, env):
raise ValueError('msbuild backend does not currently support ' +
"'generated_source'") # pragma: no cover
except ImportError: # pragma: no cover
pass
| bsd-3-clause |
guorendong/iridium-browser-ubuntu | third_party/WebKit/Source/devtools/scripts/devtools_file_hashes.py | 63 | 2869 | #!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hashlib
import os.path
try:
import json
except ImportError:
import simplejson as json
def save_hashes(hashes_file_path, hashes):
try:
with open(hashes_file_path, "wt") as hashes_file:
json.dump(hashes, hashes_file, indent=4, separators=(",", ": "))
except:
print "ERROR: Failed to write %s" % hashes_file_path
raise
def load_hashes(hashes_file_path):
try:
with open(hashes_file_path, "r") as hashes_file:
hashes = json.load(hashes_file)
except:
return {}
return hashes
def calculate_file_hash(file_path):
with open(file_path) as file:
data = file.read()
md5_hash = hashlib.md5(data).hexdigest()
return md5_hash
def files_with_invalid_hashes(hash_file_path, file_paths):
hashes = load_hashes(hash_file_path)
result = []
for file_path in file_paths:
file_name = os.path.basename(file_path)
if calculate_file_hash(file_path) != hashes.get(file_name, ""):
result.append(file_path)
return result
def update_file_hashes(hash_file_path, file_paths):
hashes = {}
for file_path in file_paths:
file_name = os.path.basename(file_path)
hashes[file_name] = calculate_file_hash(file_path)
save_hashes(hash_file_path, hashes)
| bsd-3-clause |
dropbox/changes | tests/changes/api/serializer/models/test_jobstep.py | 1 | 1090 | from changes.api.serializer import serialize
from changes.config import db
from changes.models.project import ProjectOption
from changes.testutils import TestCase
class JobStepCrumblerTestCase(TestCase):
def test_associated_snapshot_image(self):
project = self.create_project()
build = self.create_build(project=project)
plan = self.create_plan(project)
job = self.create_job(build=build)
snapshot = self.create_snapshot(project)
image = self.create_snapshot_image(plan=plan, snapshot=snapshot)
self.create_option(item_id=plan.id, name='snapshot.allow', value='1')
db.session.add(ProjectOption(project_id=project.id,
name='snapshot.current',
value=snapshot.id.hex))
db.session.commit()
self.create_job_plan(job=job, plan=plan, snapshot_id=snapshot.id)
phase = self.create_jobphase(job)
jobstep = self.create_jobstep(phase)
result = serialize(jobstep)
assert result['image']['id'] == image.id.hex
| apache-2.0 |
lunixbochs/meta | utils/tvsort/tvsort.py | 1 | 9428 | #!/usr/bin/python
# tv_sort.py
# detects and sorts tv shows automatically based on filename
import os
import shutil
import re
import xmlrpclib
words = r'([\'\w\.\- ]+?)'
spacers = re.compile(r'[\s_\-\.]+')
show_filename = re.compile(words + r'S(\d+)\s?EP?(\d+)', re.IGNORECASE)
show_filename_2 = re.compile(words + r'(\d{4}\.[012]?[0-9]\.\d{1,2})()', re.IGNORECASE)
show_filename_3 = re.compile(words + r'(\d+)x(\d+)', re.IGNORECASE)
show_filename_4 = re.compile(words + r'(\d+?)(\d{1,2})', re.IGNORECASE)
word_match = re.compile('(%s+)' % words, re.IGNORECASE)
the_match = re.compile(r'(.*?)(, The)$', re.IGNORECASE)
hd_match = re.compile(r'(720p|1080p)')
date = r'(19\d{2}|2\d{3})'
date_match = re.compile(r' ?(\(%s\)|%s) ?' % (date, date))
extensions = ('.mp4', '.avi', '.mkv', '.m4v', '.wmv', '.mpg', '.mpeg')
def forward_the(name):
if the_match.match(name):
return the_match.sub(r'The \1', name)
return name
class Auto(dict):
def __init__(self, path):
self.path = path
if os.path.isfile(path):
f = open(path, 'r')
for line in f.readlines():
line = line.strip()
if line and ':' in line:
key, value = line.split(':', 1)
key, value = key.strip(), value.strip()
value = value.strip('\'"')
self[key] = value
class Folder:
def __init__(self, path):
self.path = path
self.name = os.path.split(path)[1]
self.regex = None
self.naming = 'Season %(season)s/%(name)s %(epstr)s'
self.auto = Auto(os.path.join(path, '.auto'))
if 'regex' in self.auto:
try:
self.regex = re.compile(self.auto['regex'], re.IGNORECASE)
except:
print 'error parsing regex for: %s' % self.auto.path
if 'name' in self.auto:
self.naming = self.auto['name']
if not self.regex:
# TODO: this is terrible for single-letter names
text = word_match.match(self.name).group()
text = spacers.sub(' ', text)
self.regex = re.compile(re.escape(text), re.IGNORECASE)
def match(self, name):
match = self.regex.search(name)
if match:
return True
return False
def rename(self, name, season, episode, hd=None, ext='.avi'):
if season and episode:
season = int(season)
episode = int(episode)
epstr = 'S%02dE%02d' % (season, episode)
else:
epstr = season
if not self.auto:
self.naming = '%(name)s %(epstr)s'
target = self.naming % {'name':name, 'season':season, 'episode':episode, 'epstr':epstr}
if hd:
target = '%s (%s)' % (target, hd)
return self.path, target + ext, epstr
def __repr__(self):
return '<Folder "%s">' % self.name
def extract(path):
cwd = os.getcwd()
os.chdir(path)
first = os.listdir('.')
for entry in first:
if entry.endswith('.rar'):
print 'Extracting: %s' % entry,
os.popen('unrar x -o- %s' % entry)
break
second = os.listdir('.')
for entry in second:
ext = os.path.splitext(entry)[1]
if ext in extensions:
print '=> "%s"' % entry
os.chdir(cwd)
return entry, ext
else:
print '- Nothing found :('
os.chdir(cwd)
return None, None
def move(path, filename, name, season, episode, folders, force_move=False, dry_run=False, link=False, hd=None, ext='.avi'):
copy = True
for folder in folders:
if folder.match(name):
break
else:
print 'Skipped "%s" (example: %s)' % (name, filename)
return True
target_folder, target, epstr = folder.rename(name, season, episode, hd, ext)
no_ext = os.path.splitext(os.path.join(target_folder, target))[0]
test_base = os.path.split(no_ext)[0]
for test in extensions:
test_path = no_ext + test
test_filename = os.path.split(test_path)[1]
if not os.path.exists(test_base):
if not dry_run:
os.makedirs(test_base)
break
else:
if test_base == path:
if test_filename == filename:
return
else:
for existing in os.listdir(test_base):
if existing.lower() == test_filename.lower():
return
path = os.path.join(path, filename)
if not os.path.exists(path):
return
if os.path.isdir(path) and not dry_run:
entry, ext = extract(path)
if not entry:
print 'no extracted file found.'
return
else:
print 'success.'
path = os.path.join(path, entry)
copy = False
target_folder, target, epstr = folder.rename(name, season, episode, hd, ext)
target_path, target_filename = os.path.split(os.path.join(target_folder, target))
print ('%s %s => %s' % (name, epstr, target)),
target = os.path.join(target_folder, target)
if dry_run:
verb = copy and 'copy' or 'move'
print 'Would %s %s => %s' % (verb, path, target)
else:
if copy and not force_move:
if link:
try:
os.link(path, target)
print 'linked to target'
except IOError:
link = False
if not link:
shutil.copyfile(path, target)
print 'copied to target.'
else:
os.rename(path, target)
print 'moved to target.'
def run(source, targets, force_move=False, dry_run=False, link=False):
print
print 'If anything is skipped, make sure the show name exists as a folder in one of the targets'
print 'You can also use .auto files (see the readme) to change the show matching a folder'
folders = []
for target in targets:
if not os.path.isdir(target): continue
for folder in os.listdir(target):
path = os.path.join(target, folder)
if not os.path.isdir(path): continue
folders.append(Folder(path))
files = set()
if source.startswith('rtorrent://'):
rtorrent_uri = source.replace('rtorrent', 'http')
x = xmlrpclib.ServerProxy(rtorrent_uri)
seeding = x.download_list()
for torrent in seeding:
if not x.d.complete(torrent): continue
files.add(x.d.get_base_path(torrent))
else:
for filename in os.listdir(source):
files.add(os.path.join(source, filename))
skip = set()
for path in sorted(files):
path, filename = os.path.split(path)
cleaned_filename = hd_match.sub('', filename)
match1 = show_filename.match(cleaned_filename)
match2 = show_filename_2.match(cleaned_filename)
match3 = show_filename_3.match(cleaned_filename)
match4 = show_filename_4.match(cleaned_filename)
hd = hd_match.search(filename)
if hd:
hd = hd.group()
matches = [m for m in (match1, match2, match3, match4) if m]
if matches:
match = matches[0]
name, season, episode = match.groups()
# replace all spacer chars with spaces
name = spacers.sub(' ', name)
# strip the year from the name
name = date_match.sub(' ', name)
name = name.strip()
first = name[0]
if first == first.lower():
name = ' '.join(word.capitalize() for word in name.split(' '))
if name in skip: continue
if name.endswith(' S'):
# work around a bug when encountering full season downloads
continue
skip_name = move(path, filename, name, season, episode, folders, force_move, dry_run, link, hd)
if skip_name:
skip.add(name)
def usage():
print 'Usage: ./tv_sort.py [flags] <source> <target folder> [target folder] [target folder]...'
print 'Sorts TV shows into folders.'
print ' Flags:'
print ' -m, --move: force moving of normal files (extracted files are always moved)'
print ' -l, --link: hardlink files instead of copying'
print ' -d, --dry: dry run - only display what would be moved/copied'
print ' Source options:'
print ' * /path/to/folder'
print ' * rtorrent://localhost/scgi_path'
sys.exit(1)
if __name__ == '__main__':
import sys
args = []
force_move = False
dry_run = False
link = False
for arg in sys.argv[1:]:
sort = None
if arg.startswith('-'):
sort = ''.join(sorted(arg))
if arg in ('-m', '--move'):
force_move = True
elif arg in ('-d', '--dry'):
dry_run = True
elif arg in ('-l', '--link'):
link = True
elif sort:
if re.match(r'-[mdv]{1,3}', sort):
if 'm' in sort: force_move = True
if 'd' in sort: dry_run = True
if 'l' in sort: link = True
else:
args.append(arg)
if len(args) < 2:
usage()
else:
run(args[0], args[1:], force_move=force_move, dry_run=dry_run, link=link)
| mit |
TeamOrion-Devices/kernel_htc_msm8974 | scripts/build-all.py | 1474 | 10189 | #! /usr/bin/env python
# Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import re
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'KCONFIG_NOTIMESTAMP': 'true' })
make_env.setdefault('CROSS_COMPILE', 'arm-none-linux-gnueabi-')
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'msmkrypton*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of previous
# build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
build = Builder(log_name)
result = build.run(cmd_line + [t])
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" %
(target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
ChameleonCloud/horizon | openstack_auth/plugin/base.py | 1 | 9995 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import logging
import re
from django.utils.translation import ugettext_lazy as _
from keystoneauth1 import exceptions as keystone_exceptions
from keystoneclient.v3 import client as v3_client
import six
from openstack_auth import exceptions
from openstack_auth import utils
LOG = logging.getLogger(__name__)
__all__ = ['BasePlugin']
@six.add_metaclass(abc.ABCMeta)
class BasePlugin(object):
"""Base plugin to provide ways to log in to dashboard.
Provides a framework for keystoneclient plugins that can be used with the
information provided to return an unscoped token.
"""
@abc.abstractmethod
def get_plugin(self, auth_url=None, **kwargs):
"""Create a new plugin to attempt to authenticate.
Given the information provided by the login providers attempt to create
an authentication plugin that can be used to authenticate the user.
If the provided login information does not contain enough information
for this plugin to proceed then it should return None.
:param str auth_url: The URL to authenticate against.
:returns: A plugin that will be used to authenticate or None if the
plugin cannot authenticate with the data provided.
:rtype: keystoneclient.auth.BaseAuthPlugin
"""
return None
@property
def keystone_version(self):
"""The Identity API version as specified in the settings file."""
return utils.get_keystone_version()
def list_projects(self, session, auth_plugin, auth_ref=None):
"""List the projects that are accessible to this plugin.
Query the keystone server for all projects that this authentication
token can be rescoped to.
This function is overrideable by plugins if they use a non-standard
mechanism to determine projects.
:param session: A session object for communication:
:type session: keystoneclient.session.Session
:param auth_plugin: The auth plugin returned by :py:meth:`get_plugin`.
:type auth_plugin: keystoneclient.auth.BaseAuthPlugin
:param auth_ref: The current authentication data. This is optional as
future auth plugins may not have auth_ref data and all
the required information should be available via the
auth_plugin.
:type auth_ref: keystoneclient.access.AccessInfo` or None.
:raises: exceptions.KeystoneAuthException on lookup failure.
:returns: A list of projects. This currently accepts returning both v2
or v3 keystoneclient projects objects.
"""
try:
client = v3_client.Client(session=session, auth=auth_plugin)
if auth_ref.is_federated:
return client.federation.projects.list()
else:
return client.projects.list(user=auth_ref.user_id)
except (keystone_exceptions.ClientException,
keystone_exceptions.AuthorizationFailure):
msg = _('Unable to retrieve authorized projects.')
raise exceptions.KeystoneRetrieveProjectsException(msg)
def list_domains(self, session, auth_plugin, auth_ref=None):
try:
client = v3_client.Client(session=session, auth=auth_plugin)
return client.auth.domains()
except (keystone_exceptions.ClientException,
keystone_exceptions.AuthorizationFailure):
msg = _('Unable to retrieve authorized domains.')
raise exceptions.KeystoneRetrieveDomainsException(msg)
def get_access_info(self, keystone_auth):
"""Get the access info from an unscoped auth
This function provides the base functionality that the
plugins will use to authenticate and get the access info object.
:param keystone_auth: keystoneauth1 identity plugin
:raises: exceptions.KeystoneAuthException on auth failure
:returns: keystoneclient.access.AccessInfo
"""
session = utils.get_session()
try:
unscoped_auth_ref = keystone_auth.get_access(session)
except keystone_exceptions.ConnectFailure as exc:
LOG.error(str(exc))
msg = _('Unable to establish connection to keystone endpoint.')
raise exceptions.KeystoneConnectionException(msg)
except (keystone_exceptions.Unauthorized,
keystone_exceptions.Forbidden,
keystone_exceptions.NotFound) as exc:
msg = str(exc)
LOG.debug(msg)
match = re.match(r"The password is expired and needs to be changed"
r" for user: ([^.]*)[.].*", msg)
if match:
exc = exceptions.KeystonePassExpiredException(
_('Password expired.'))
exc.user_id = match.group(1)
raise exc
msg = _('Invalid credentials.')
raise exceptions.KeystoneCredentialsException(msg)
except (keystone_exceptions.ClientException,
keystone_exceptions.AuthorizationFailure) as exc:
msg = _("An error occurred authenticating. "
"Please try again later.")
LOG.debug(str(exc))
raise exceptions.KeystoneAuthException(msg)
return unscoped_auth_ref
def get_project_scoped_auth(self, unscoped_auth, unscoped_auth_ref,
recent_project=None):
"""Get the project scoped keystone auth and access info
This function returns a project scoped keystone token plugin
and AccessInfo object.
:param unscoped_auth: keystone auth plugin
:param unscoped_auth_ref: keystoneclient.access.AccessInfo` or None.
:param recent_project: project that we should try to scope to
:return: keystone token auth plugin, AccessInfo object
"""
auth_url = unscoped_auth.auth_url
session = utils.get_session()
projects = self.list_projects(
session, unscoped_auth, unscoped_auth_ref)
# Attempt to scope only to enabled projects
projects = [project for project in projects if project.enabled]
# if a most recent project was found, try using it first
if recent_project:
for pos, project in enumerate(projects):
if project.id == recent_project:
# move recent project to the beginning
projects.pop(pos)
projects.insert(0, project)
break
scoped_auth = None
scoped_auth_ref = None
for project in projects:
token = unscoped_auth_ref.auth_token
scoped_auth = utils.get_token_auth_plugin(auth_url,
token=token,
project_id=project.id)
try:
scoped_auth_ref = scoped_auth.get_access(session)
except (keystone_exceptions.ClientException,
keystone_exceptions.AuthorizationFailure):
LOG.info('Attempted scope to project %s failed, will attempt '
'to scope to another project.', project.name)
else:
break
return scoped_auth, scoped_auth_ref
def get_domain_scoped_auth(self, unscoped_auth, unscoped_auth_ref,
domain_name=None):
"""Get the domain scoped keystone auth and access info
This function returns a domain scoped keystone token plugin
and AccessInfo object.
:param unscoped_auth: keystone auth plugin
:param unscoped_auth_ref: keystoneclient.access.AccessInfo` or None.
:param domain_name: domain that we should try to scope to
:return: keystone token auth plugin, AccessInfo object
"""
session = utils.get_session()
auth_url = unscoped_auth.auth_url
if domain_name:
domains = [domain_name]
else:
domains = self.list_domains(session,
unscoped_auth,
unscoped_auth_ref)
domains = [domain.name for domain in domains if domain.enabled]
# domain support can require domain scoped tokens to perform
# identity operations depending on the policy files being used
# for keystone.
domain_auth = None
domain_auth_ref = None
for _name in domains:
token = unscoped_auth_ref.auth_token
domain_auth = utils.get_token_auth_plugin(
auth_url,
token,
domain_name=_name)
try:
domain_auth_ref = domain_auth.get_access(session)
except (keystone_exceptions.ClientException,
keystone_exceptions.AuthorizationFailure):
LOG.info('Attempted scope to domain %s failed, will attempt '
'to scope to another domain.', _name)
else:
if len(domains) > 1:
LOG.info("More than one valid domain found for user %s,"
" scoping to %s",
unscoped_auth_ref.user_id, _name)
break
return domain_auth, domain_auth_ref
| apache-2.0 |
jose36/jmdl4 | playtvfr.py | 297 | 8750 | import struct
import urllib2,urllib
import re
import json
import math
CRYPT_XXTEA_DELTA= 0x9E3779B9
headers = [('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),( 'Connection','Keep-Alive')]
class Crypt_XXTEA:
_key=None
def setKey(self,key):
if isinstance(key, basestring):
k = self._str2long(key, False);
elif isinstance(key, list):
k = key;
else:
print "The secret key must be a string or long integer array"
if (len(k) > 4):
print "The secret key cannot be more than 16 characters or 4 long values"
elif (len(k) == 0):
print "The secret key cannot be empty"
elif (len(k) < 4):
for i in range(len(k),4):
k.append(0)
#k[i] = 0;
#print k
self._key = k;
def encrypt(self,plaintext):
if (self._key == None):
print "Secret key is undefined"
if isinstance(plaintext, basestring):
return self._encryptString(plaintext)
elif isinstance(plaintext, list):
return self._encryptArray(plaintext)
else:
print "The plain text must be a string or long integer array"
def decrypt(self,ciphertext):
if (self._key == None):
print "Secret key is undefined"
#print 'dec',isinstance(ciphertext, basestring)
if isinstance(ciphertext, basestring):
return self._decryptString(ciphertext)
elif isinstance(ciphertext, list):
return self._decryptArray(ciphertext)
else:
print "The plain text must be a string or long integer array"
def _encryptString(self,str):
if (str == ''):
return ''
v = self._str2long(str, False);
v = self._encryptArray(v);
return self._long2str(v, False);
def _encryptArray(self,v):
n = len(v) - 1;
z = v[n];
y = v[0];
q = math.floor(6 + 52 / (n + 1));
sum = 0;
while (0 < q):
q-=1
sum = self._int32(sum + CRYPT_XXTEA_DELTA);
e = sum >> 2 & 3;
for p in range(0,n):
y = v[p + 1];
mx = self._int32(((z >> 5 & 0x07FFFFFF) ^ y << 2) + ((y >> 3 & 0x1FFFFFFF) ^ z << 4)) ^ self._int32((sum ^ y) + (self._key[p & 3 ^ e] ^ z));
z = v[p] = self._int32(v[p] + mx);
p+=1#due to range
y = v[0];
mx = self._int32(((z >> 5 & 0x07FFFFFF) ^ y << 2) + ((y >> 3 & 0x1FFFFFFF) ^ z << 4)) ^ self._int32((sum ^ y) + (self._key[p & 3 ^ e] ^ z));
z = v[n] = self._int32(v[n] + mx);
return v;
def _decryptString(self,str):
if (str == ''):
return '';
v = self._str2long(str, False);
v = self._decryptArray(v);
return self._long2str(v, False);
def _decryptArray(self,v):
n = len(v) - 1;
z = v[n];
y = v[0];
q = math.floor(6 + 52 / (n + 1));
sum = self._int32(q * CRYPT_XXTEA_DELTA);
while (sum != 0):
e = sum >> 2 & 3;
for p in range( n, 0, -1):
z = v[p - 1];
mx = self._int32(((z >> 5 & 0x07FFFFFF) ^ y << 2) + ((y >> 3 & 0x1FFFFFFF) ^ z << 4)) ^ self._int32((sum ^ y) + (self._key[p & 3 ^ e] ^ z));
y = v[p] = self._int32(v[p] - mx);
p=p-1 #due to range
z = v[n];
mx = self._int32(((z >> 5 & 0x07FFFFFF) ^ y << 2) + ((y >> 3 & 0x1FFFFFFF) ^ z << 4)) ^ self._int32((sum ^ y) + (self._key[p & 3 ^ e] ^ z));
y = v[0] = self._int32(v[0] - mx);
sum = self._int32(sum - CRYPT_XXTEA_DELTA);
return v;
def _long2str(self,v, w):
ln = len(v);
s = '';
for i in range(0,ln):
s += struct.pack('<I', v[i]&0xFFFFFFFF);
if (w):
return substr(s, 0, v[ln - 1]);
else:
return s;
def _str2long(self,s, w):
#return (s + ("\0" *( (4 - len(s) % 4) & 3))).encode("hex")
i=int(math.ceil((len(s)/4)))
if (len(s)%4)>0 :
i+=1
#print struct.unpack('<I',(s + ("\0" *( (4 - len(s) % 4) & 3))))
v = list(struct.unpack(('I'*i),(s + ("\0" *( (4 - len(s) % 4) & 3)))))
if (w):
v[0] = len(s); #prb
return v;
def _int32(self,n):
while (n >= 2147483648):
n -= 4294967296;
while (n <= -2147483649):
n += 4294967296;
return int(n);
def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if headers:
for h,hv in headers:
req.add_header(h,hv)
response = opener.open(req,post,timeout=timeout)
link=response.read()
response.close()
return link;
def HexToByte( hexStr ):
"""
Convert a string hex byte values into a byte string. The Hex Byte values may
or may not be space separated.
"""
# The list comprehension implementation is fractionally slower in this case
#
# hexStr = ''.join( hexStr.split(" ") )
# return ''.join( ["%c" % chr( int ( hexStr[i:i+2],16 ) ) \
# for i in range(0, len( hexStr ), 2) ] )
bytes = []
hexStr = ''.join( hexStr.split(" ") )
for i in range(0, len(hexStr), 2):
bytes.append( chr( int (hexStr[i:i+2], 16 ) ) )
return ''.join( bytes )
def get_url(player_id):
v=Crypt_XXTEA()
import time
# Retrieve channel id and primary key
timestamp = time.time();
#player_id = '69T7MabZ47';
init = getUrl("http://tvplayer.playtv.fr/js/"+player_id+".js?_="+str(timestamp),headers=headers);
#print init
pat="b:(\{\"a.*\"})}"
init =re.compile(pat).findall(init)[0]
init = json.loads(init);
from binascii import unhexlify
from binascii import hexlify
a = init['a'];
b = init['b'];
b=b.decode("hex")
a=a.decode("hex")
bb=""
v.setKey("object");
#b=v._long2str(b,False)
b_s=v.decrypt(b).rstrip('\0')
params = json.loads(b_s)
pack_k=params['k'].decode("hex")# pack("H*", params['k'])#init['a']
key = v.decrypt(pack_k).rstrip('\0');
v.setKey(key);
a_d=v.decrypt(a).rstrip('\0')
params = json.loads(a_d);
channel_id = params['i'];
api_url = params['u'];
req={"i": channel_id, "t": timestamp,"h":"playtv.fr","a":5}
req = json.dumps(req)
req_en=v.encrypt(req)
req_en=req_en.encode("hex");# struct.unpack("H"*(len(req_en)/4),req_en);
if not req_en.endswith( '/'):
req_en += '/';
headers2 =headers.append( [('Referer','http://static.playtv.fr/swf/tvplayer.swf?r=22'),( 'x-flash-version','11,6,602,180')])
init = getUrl(api_url+req_en,headers=headers2);
init=init.decode("hex")
params = json.loads(v.decrypt(init).rstrip('\0'));
if params['s'][1] and params['s'][1] <>'' :
streams =params['s'][0] if params['s'][0]['bitrate'] > params['s'][1]['bitrate'] else params['s'][1];
else:
streams = params['s'][0];
scheme = streams['scheme'];
host = streams['host'];
port = streams['port'];
app = streams['application'];
playpath = streams['stream'];
token = streams['token'];
title = streams['title'];
t = params['j']['t'];
k = params['j']['k'];
v.setKey("object");
key=v.decrypt(k.decode("hex"))# pack("H*", k));
v.setKey(key);
auth = v.encrypt(t).encode("hex") #unpack("H*", $xxtea->encrypt($t));
if (scheme == "http"):
final_url = scheme+"://"+host + ( ":" +port if port and len(port)>0 else "") + "/" + playpath
else:
final_url = scheme + "://" + host +( ":" +port if port and len(port)>0 else "") + "/" + app +" app=" + app +" swfUrl=http://static.playtv.fr/swf/tvplayer.swf pageUrl=http://playtv.fr/television Conn=S:" + auth + (" token=" + token if token and len(token)>0 else "") + " playpath=" + playpath +' live=1 timeout=20'
print final_url
return final_url
#print get_url('69T7MabZ47')
| gpl-2.0 |
nburn42/tensorflow | tensorflow/contrib/autograph/operators/control_flow.py | 3 | 7765 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Control flow statements: loops, conditionals, etc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.autograph.utils import builtins
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
def for_stmt(iter_, extra_test, body, init_state):
"""Functional form of a for statement.
The loop operates on a state, which includes all symbols that are
variant across loop iterations, excluding the iterate as well as the
variables local to the loop.
For example, given the loop below that calculates the geometric and
arithmetic means or some numbers:
geo_mean = 1
arith_mean = 0
for i in range(n):
a = numbers[i]
geo_mean *= a
arith_mean += a
The state is represented by the variables geo_mean and arith_mean. The
argument for initial_state may contain the tuple (1, 0), the body will
include the arguments geo_mean and arith_mean and will return a tuple
representing the new values for geo_mean and respectively arith_mean.
Args:
iter_: The entity being iterated over.
extra_test: Callable with the state as arguments, and boolean return type.
An additionnal loop condition.
body: Callable with the iterate and the state as arguments, and
state as return type. The actual loop body.
init_state: Tuple containing the initial state.
Returns:
Tuple containing the final state.
"""
if tensor_util.is_tensor(iter_):
return _known_len_for_stmt(iter_, extra_test, body, init_state)
elif isinstance(iter_, dataset_ops.Dataset):
return _dataset_for_stmt(iter_, extra_test, body, init_state)
else:
return _py_for_stmt(iter_, extra_test, body, init_state)
def _py_for_stmt(iter_, extra_test, body, init_state):
"""Overload of for_stmt that executes a Python for loop."""
state = init_state
for target in iter_:
if not extra_test(*state):
break
state = body(target, *state)
# TODO(mdan): Remove this special case.
if len(state) == 1:
return state[0]
return state
def _known_len_for_stmt(iter_, extra_test, body, init_state):
"""Overload of for_stmt that iterates over objects that define a length."""
n = builtins.dynamic_len(iter_)
def while_body(iterate_index, *state):
iterate = iter_[iterate_index]
new_state = body(iterate, *state)
return (iterate_index + 1,) + new_state
def while_cond(iterate_index, *state):
return gen_math_ops.logical_and(iterate_index < n, extra_test(*state))
results = while_stmt(
while_cond,
while_body,
init_state=(0,) + init_state,
extra_deps=(iter_,),
opts=dict(maximum_iterations=n))
# Dropping the iteration index because it's not syntactically visible.
results = results[1:]
# TODO(mdan): Remove this special case.
if len(results) == 1:
return results[0]
return results
def _dataset_for_stmt(ds, extra_test, body, init_state):
"""Overload of for_stmt that iterates over TF Datasets."""
# Because Datsets only expose get_next, in the style of Python iterators,
# we are forced to unpack the loop as:
#
# epoch_number, iterate = ds.get_next()
# while epoch_number < 2:
# <body>
# epoch_number, iterate = ds.get_next()
epoch_numbers = dataset_ops.Dataset.range(2)
def tag_with(ds, tag):
return dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensors(tag).repeat(), ds))
ds_with_epoch = epoch_numbers.flat_map(lambda i: tag_with(ds, i))
iterator = ds_with_epoch.make_initializable_iterator()
with ops.control_dependencies((iterator.initializer,)):
epoch_number, iterate = iterator.get_next()
def while_body(epoch_number, iterate, *state):
new_state = body(iterate, *state)
epoch_number, iterate = iterator.get_next()
return (epoch_number, iterate) + new_state
def while_cond(epoch_number, iterate, *state):
del iterate
return gen_math_ops.logical_and(epoch_number < 1, extra_test(*state))
results = while_stmt(
while_cond,
while_body,
init_state=(epoch_number, iterate) + init_state,
extra_deps=())
# Dropping the epoch number and iterate because they are not not syntactically
# visible.
results = results[2:]
# TODO(mdan): Remove this special case.
if len(results) == 1:
return results[0]
return results
def while_stmt(test, body, init_state, extra_deps, opts=None):
"""Functional form of a while statement.
The loop operates on a so-called state, which includes all symbols that are
variant across loop iterations. In what follows we refer to state as either
a tuple of entities that represent an actual state, or a list of arguments
of the corresponding types.
Args:
test: Callable with the state as arguments, and boolean return type.
The loop condition.
body: Callable with the state as arguments, and state as return type.
The actual loop body.
init_state: Tuple containing the initial state.
extra_deps: Tuple containing additional entities on which the loop may
depend, such as loop invariants referenced by test. Used
exclusively for dispatch control.
opts: Optional dict of extra loop parameters.
Returns:
Tuple containing the final state.
"""
# TODO(mdan): Consider adding a generic mechanism for dynamic dispatch.
# That could be something as simple as a collection of dispatch rules, with
# some prioritization.
if any(tensor_util.is_tensor(v) for v in init_state + extra_deps):
return _tf_while_stmt(test, body, init_state, opts)
else:
return _py_while_stmt(test, body, init_state, opts)
def _tf_while_stmt(test, body, init_state, opts):
"""Overload of while_stmt that stages a TF while_stmt."""
if opts is None:
opts = {}
return control_flow_ops.while_loop(test, body, init_state, **opts)
def _py_while_stmt(test, body, init_state, opts):
"""Overload of while_stmt that executes a Python while loop."""
del opts
state = init_state
while test(*state):
state = body(*state)
return state
def if_stmt(cond, body, orelse):
"""Functional form of an if statement.
Args:
cond: Boolean.
body: Callable with no arguments, and outputs of the positive (if) branch
as return type.
orelse: Callable with no arguments, and outputs of the negative (else)
branch as return type.
Returns:
Tuple containing the statement outputs.
"""
if tensor_util.is_tensor(cond):
return _tf_if_stmt(cond, body, orelse)
else:
return _py_if_stmt(cond, body, orelse)
def _tf_if_stmt(cond, body, orelse):
"""Overload of if_stmt that stages a TF cond."""
return control_flow_ops.cond(cond, body, orelse)
def _py_if_stmt(cond, body, orelse):
"""Overload of if_stmt that executes a Python if statement."""
return body() if cond else orelse()
| apache-2.0 |
szd1007/thft-learn | test/py.twisted/test_suite.py | 18 | 5655 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import os
import glob
import time
basepath = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(basepath, 'gen-py.twisted'))
sys.path.insert(0, glob.glob(os.path.join(basepath, '../../lib/py/build/lib.*'))[0])
from ThriftTest import ThriftTest
from ThriftTest.ttypes import Xception, Xtruct
from thrift.transport import TTwisted
from thrift.protocol import TBinaryProtocol
from twisted.trial import unittest
from twisted.internet import defer, reactor
from twisted.internet.protocol import ClientCreator
from zope.interface import implements
class TestHandler:
implements(ThriftTest.Iface)
def __init__(self):
self.onewaysQueue = defer.DeferredQueue()
def testVoid(self):
pass
def testString(self, s):
return s
def testByte(self, b):
return b
def testI16(self, i16):
return i16
def testI32(self, i32):
return i32
def testI64(self, i64):
return i64
def testDouble(self, dub):
return dub
def testBinary(self, thing):
return thing
def testStruct(self, thing):
return thing
def testException(self, s):
if s == 'Xception':
x = Xception()
x.errorCode = 1001
x.message = s
raise x
elif s == "throw_undeclared":
raise ValueError("foo")
def testOneway(self, seconds):
def fireOneway(t):
self.onewaysQueue.put((t, time.time(), seconds))
reactor.callLater(seconds, fireOneway, time.time())
def testNest(self, thing):
return thing
def testMap(self, thing):
return thing
def testSet(self, thing):
return thing
def testList(self, thing):
return thing
def testEnum(self, thing):
return thing
def testTypedef(self, thing):
return thing
class ThriftTestCase(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.handler = TestHandler()
self.processor = ThriftTest.Processor(self.handler)
self.pfactory = TBinaryProtocol.TBinaryProtocolFactory()
self.server = reactor.listenTCP(
0, TTwisted.ThriftServerFactory(self.processor, self.pfactory), interface="127.0.0.1")
self.portNo = self.server.getHost().port
self.txclient = yield ClientCreator(reactor,
TTwisted.ThriftClientProtocol,
ThriftTest.Client,
self.pfactory).connectTCP("127.0.0.1", self.portNo)
self.client = self.txclient.client
@defer.inlineCallbacks
def tearDown(self):
yield self.server.stopListening()
self.txclient.transport.loseConnection()
@defer.inlineCallbacks
def testVoid(self):
self.assertEquals((yield self.client.testVoid()), None)
@defer.inlineCallbacks
def testString(self):
self.assertEquals((yield self.client.testString('Python')), 'Python')
@defer.inlineCallbacks
def testByte(self):
self.assertEquals((yield self.client.testByte(63)), 63)
@defer.inlineCallbacks
def testI32(self):
self.assertEquals((yield self.client.testI32(-1)), -1)
self.assertEquals((yield self.client.testI32(0)), 0)
@defer.inlineCallbacks
def testI64(self):
self.assertEquals((yield self.client.testI64(-34359738368)), -34359738368)
@defer.inlineCallbacks
def testDouble(self):
self.assertEquals((yield self.client.testDouble(-5.235098235)), -5.235098235)
# TODO: def testBinary(self) ...
@defer.inlineCallbacks
def testStruct(self):
x = Xtruct()
x.string_thing = "Zero"
x.byte_thing = 1
x.i32_thing = -3
x.i64_thing = -5
y = yield self.client.testStruct(x)
self.assertEquals(y.string_thing, "Zero")
self.assertEquals(y.byte_thing, 1)
self.assertEquals(y.i32_thing, -3)
self.assertEquals(y.i64_thing, -5)
@defer.inlineCallbacks
def testException(self):
yield self.client.testException('Safe')
try:
yield self.client.testException('Xception')
self.fail("should have gotten exception")
except Xception as x:
self.assertEquals(x.errorCode, 1001)
self.assertEquals(x.message, 'Xception')
try:
yield self.client.testException("throw_undeclared")
self.fail("should have thrown exception")
except Exception: # type is undefined
pass
@defer.inlineCallbacks
def testOneway(self):
yield self.client.testOneway(1)
start, end, seconds = yield self.handler.onewaysQueue.get()
self.assertAlmostEquals(seconds, (end - start), places=1)
| apache-2.0 |
2014cdbg3/2015cdbg9 | static/Brython3.1.1-20150328-091302/Lib/errno.py | 624 | 4096 | """
This module makes available standard errno system symbols.
The value of each symbol is the corresponding integer value,
e.g., on most systems, errno.ENOENT equals the integer 2.
The dictionary errno.errorcode maps numeric codes to symbol names,
e.g., errno.errorcode[2] could be the string 'ENOENT'.
Symbols that are not relevant to the underlying system are not defined.
To map error codes to error messages, use the function os.strerror(),
e.g. os.strerror(2) could return 'No such file or directory'.
"""
errorcode= {1: 'EPERM', 2: 'ENOENT', 3: 'ESRCH', 4: 'EINTR', 5: 'EIO',
6: 'ENXIO', 7: 'E2BIG', 8: 'ENOEXEC', 9: 'EBADF', 10: 'ECHILD', 11: 'EAGAIN',
12: 'ENOMEM', 13: 'EACCES', 14: 'EFAULT', 15: 'ENOTBLK', 16: 'EBUSY',
17: 'EEXIST', 18: 'EXDEV', 19: 'ENODEV', 20: 'ENOTDIR', 21: 'EISDIR',
22: 'EINVAL', 23: 'ENFILE', 24: 'EMFILE', 25: 'ENOTTY', 26: 'ETXTBSY',
27: 'EFBIG', 28: 'ENOSPC', 29: 'ESPIPE', 30: 'EROFS', 31: 'EMLINK',
32: 'EPIPE', 33: 'EDOM', 34: 'ERANGE', 35: 'EDEADLOCK', 36: 'ENAMETOOLONG',
37: 'ENOLCK', 38: 'ENOSYS', 39: 'ENOTEMPTY', 40: 'ELOOP', 42: 'ENOMSG',
43: 'EIDRM', 44: 'ECHRNG', 45: 'EL2NSYNC', 46: 'EL3HLT', 47: 'EL3RST',
48: 'ELNRNG', 49: 'EUNATCH', 50: 'ENOCSI', 51: 'EL2HLT', 52: 'EBADE',
53: 'EBADR', 54: 'EXFULL', 55: 'ENOANO', 56: 'EBADRQC', 57: 'EBADSLT',
59: 'EBFONT', 60: 'ENOSTR', 61: 'ENODATA', 62: 'ETIME', 63: 'ENOSR',
64: 'ENONET', 65: 'ENOPKG', 66: 'EREMOTE', 67: 'ENOLINK', 68: 'EADV',
69: 'ESRMNT', 70: 'ECOMM', 71: 'EPROTO', 72: 'EMULTIHOP', 73: 'EDOTDOT',
74: 'EBADMSG', 75: 'EOVERFLOW', 76: 'ENOTUNIQ', 77: 'EBADFD', 78: 'EREMCHG',
79: 'ELIBACC', 80: 'ELIBBAD', 81: 'ELIBSCN', 82: 'ELIBMAX', 83: 'ELIBEXEC',
84: 'EILSEQ', 85: 'ERESTART', 86: 'ESTRPIPE', 87: 'EUSERS', 88: 'ENOTSOCK',
89: 'EDESTADDRREQ', 90: 'EMSGSIZE', 91: 'EPROTOTYPE', 92: 'ENOPROTOOPT',
93: 'EPROTONOSUPPORT', 94: 'ESOCKTNOSUPPORT', 95: 'ENOTSUP',
96: 'EPFNOSUPPORT', 97: 'EAFNOSUPPORT', 98: 'EADDRINUSE',
99: 'EADDRNOTAVAIL', 100: 'ENETDOWN', 101: 'ENETUNREACH', 102: 'ENETRESET',
103: 'ECONNABORTED', 104: 'ECONNRESET', 105: 'ENOBUFS', 106: 'EISCONN',
107: 'ENOTCONN', 108: 'ESHUTDOWN', 109: 'ETOOMANYREFS', 110: 'ETIMEDOUT',
111: 'ECONNREFUSED', 112: 'EHOSTDOWN', 113: 'EHOSTUNREACH', 114: 'EALREADY',
115: 'EINPROGRESS', 116: 'ESTALE', 117: 'EUCLEAN', 118: 'ENOTNAM',
119: 'ENAVAIL', 120: 'EISNAM', 121: 'EREMOTEIO', 122: 'EDQUOT',
123: 'ENOMEDIUM', 124: 'EMEDIUMTYPE', 125: 'ECANCELED', 126: 'ENOKEY',
127: 'EKEYEXPIRED', 128: 'EKEYREVOKED', 129: 'EKEYREJECTED',
130: 'EOWNERDEAD', 131: 'ENOTRECOVERABLE', 132: 'ERFKILL'}
EPERM=1
ENOENT=2
ESRCH=3
EINTR=4
EIO=5
ENXIO=6
E2BIG=7
ENOEXEC=8
EBADF=9
ECHILD=10
EAGAIN=11
ENOMEM=12
EACCES=13
EFAULT=14
ENOTBLK=15
EBUSY=16
EEXIST=17
EXDEV=18
ENODEV=19
ENOTDIR=20
EISDIR=21
EINVAL=22
ENFILE=23
EMFILE=24
ENOTTY=25
ETXTBSY=26
EFBIG=27
ENOSPC=28
ESPIPE=29
EROFS=30
EMLINK=31
EPIPE=32
EDOM=33
ERANGE=34
EDEADLOCK=35
ENAMETOOLONG=36
ENOLCK=37
ENOSYS=38
ENOTEMPTY=39
ELOOP=40
ENOMSG=42
EIDRM=43
ECHRNG=44
EL2NSYNC=45
EL3HLT=46
EL3RST=47
ELNRNG=48
EUNATCH=49
ENOCSI=50
EL2HLT=51
EBADE=52
EBADR=53
EXFULL=54
ENOANO=55
EBADRQC=56
EBADSLT=57
EBFONT=59
ENOSTR=60
ENODATA=61
ETIME=62
ENOSR=63
ENONET=64
ENOPKG=65
EREMOTE=66
ENOLINK=67
EADV=68
ESRMNT=69
ECOMM=70
EPROTO=71
EMULTIHOP=72
EDOTDOT=73
EBADMSG=74
EOVERFLOW=75
ENOTUNIQ=76
EBADFD=77
EREMCHG=78
ELIBACC=79
ELIBBAD=80
ELIBSCN=81
ELIBMAX=82
ELIBEXEC=83
EILSEQ=84
ERESTART=85
ESTRPIPE=86
EUSERS=87
ENOTSOCK=88
EDESTADDRREQ=89
EMSGSIZE=90
EPROTOTYPE=91
ENOPROTOOPT=92
EPROTONOSUPPORT=93
ESOCKTNOSUPPORT=94
ENOTSUP=95
EPFNOSUPPORT=96
EAFNOSUPPORT=97
EADDRINUSE=98
EADDRNOTAVAIL=99
ENETDOWN=100
ENETUNREACH=101
ENETRESET=102
ECONNABORTED=103
ECONNRESET=104
ENOBUFS=105
EISCONN=106
ENOTCONN=107
ESHUTDOWN=108
ETOOMANYREFS=109
ETIMEDOUT=110
ECONNREFUSED=111
EHOSTDOWN=112
EHOSTUNREACH=113
EALREADY=114
EINPROGRESS=115
ESTALE=116
EUCLEAN=117
ENOTNAM=118
ENAVAIL=119
EISNAM=120
EREMOTEIO=121
EDQUOT=122
ENOMEDIUM=123
EMEDIUMTYPE=124
ECANCELED=125
ENOKEY=126
EKEYEXPIRED=127
EKEYREVOKED=128
EKEYREJECTED=129
EOWNERDEAD=130
ENOTRECOVERABLE=131
ERFKILL=132
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.