repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Djabbz/wakatime | wakatime/packages/pygments_py3/pygments/lexers/dsls.py | 72 | 18768 | # -*- coding: utf-8 -*-
"""
pygments.lexers.dsls
~~~~~~~~~~~~~~~~~~~~
Lexers for various domain-specific languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, words, include, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal
__all__ = ['ProtoBufLexer', 'BroLexer', 'PuppetLexer', 'RslLexer',
'MscgenLexer', 'VGLLexer', 'AlloyLexer', 'PanLexer']
class ProtoBufLexer(RegexLexer):
"""
Lexer for `Protocol Buffer <http://code.google.com/p/protobuf/>`_
definition files.
.. versionadded:: 1.4
"""
name = 'Protocol Buffer'
aliases = ['protobuf', 'proto']
filenames = ['*.proto']
tokens = {
'root': [
(r'[ \t]+', Text),
(r'[,;{}\[\]()]', Punctuation),
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
(words((
'import', 'option', 'optional', 'required', 'repeated', 'default',
'packed', 'ctype', 'extensions', 'to', 'max', 'rpc', 'returns',
'oneof'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64',
'fixed32', 'fixed64', 'sfixed32', 'sfixed64',
'float', 'double', 'bool', 'string', 'bytes'), suffix=r'\b'),
Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text), 'package'),
(r'(message|extend)(\s+)',
bygroups(Keyword.Declaration, Text), 'message'),
(r'(enum|group|service)(\s+)',
bygroups(Keyword.Declaration, Text), 'type'),
(r'\".*?\"', String),
(r'\'.*?\'', String),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'(\-?(inf|nan))\b', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'[+-=]', Operator),
(r'([a-zA-Z_][\w.]*)([ \t]*)(=)',
bygroups(Name.Attribute, Text, Operator)),
('[a-zA-Z_][\w.]*', Name),
],
'package': [
(r'[a-zA-Z_]\w*', Name.Namespace, '#pop'),
default('#pop'),
],
'message': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'type': [
(r'[a-zA-Z_]\w*', Name, '#pop'),
default('#pop'),
],
}
class BroLexer(RegexLexer):
"""
For `Bro <http://bro-ids.org/>`_ scripts.
.. versionadded:: 1.5
"""
name = 'Bro'
aliases = ['bro']
filenames = ['*.bro']
_hex = r'[0-9a-fA-F_]'
_float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?'
_h = r'[A-Za-z0-9][-A-Za-z0-9]*'
tokens = {
'root': [
# Whitespace
(r'^@.*?\n', Comment.Preproc),
(r'#.*?\n', Comment.Single),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
# Keywords
(r'(add|alarm|break|case|const|continue|delete|do|else|enum|event'
r'|export|for|function|if|global|hook|local|module|next'
r'|of|print|redef|return|schedule|switch|type|when|while)\b', Keyword),
(r'(addr|any|bool|count|counter|double|file|int|interval|net'
r'|pattern|port|record|set|string|subnet|table|time|timer'
r'|vector)\b', Keyword.Type),
(r'(T|F)\b', Keyword.Constant),
(r'(&)((?:add|delete|expire)_func|attr|(?:create|read|write)_expire'
r'|default|disable_print_hook|raw_output|encrypt|group|log'
r'|mergeable|optional|persistent|priority|redef'
r'|rotate_(?:interval|size)|synchronized)\b',
bygroups(Punctuation, Keyword)),
(r'\s+module\b', Keyword.Namespace),
# Addresses, ports and networks
(r'\d+/(tcp|udp|icmp|unknown)\b', Number),
(r'(\d+\.){3}\d+', Number),
(r'(' + _hex + r'){7}' + _hex, Number),
(r'0x' + _hex + r'(' + _hex + r'|:)*::(' + _hex + r'|:)*', Number),
(r'((\d+|:)(' + _hex + r'|:)*)?::(' + _hex + r'|:)*', Number),
(r'(\d+\.\d+\.|(\d+\.){2}\d+)', Number),
# Hostnames
(_h + r'(\.' + _h + r')+', String),
# Numeric
(_float + r'\s+(day|hr|min|sec|msec|usec)s?\b', Literal.Date),
(r'0[xX]' + _hex, Number.Hex),
(_float, Number.Float),
(r'\d+', Number.Integer),
(r'/', String.Regex, 'regex'),
(r'"', String, 'string'),
# Operators
(r'[!%*/+:<=>?~|-]', Operator),
(r'([-+=&|]{2}|[+=!><-]=)', Operator),
(r'(in|match)\b', Operator.Word),
(r'[{}()\[\]$.,;]', Punctuation),
# Identfier
(r'([_a-zA-Z]\w*)(::)', bygroups(Name, Name.Namespace)),
(r'[a-zA-Z_]\w*', Name)
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String),
(r'\\\n', String),
(r'\\', String)
],
'regex': [
(r'/', String.Regex, '#pop'),
(r'\\[\\nt/]', String.Regex), # String.Escape is too intense here.
(r'[^\\/\n]+', String.Regex),
(r'\\\n', String.Regex),
(r'\\', String.Regex)
]
}
class PuppetLexer(RegexLexer):
"""
For `Puppet <http://puppetlabs.com/>`__ configuration DSL.
.. versionadded:: 1.6
"""
name = 'Puppet'
aliases = ['puppet']
filenames = ['*.pp']
tokens = {
'root': [
include('comments'),
include('keywords'),
include('names'),
include('numbers'),
include('operators'),
include('strings'),
(r'[]{}:(),;[]', Punctuation),
(r'[^\S\n]+', Text),
],
'comments': [
(r'\s*#.*$', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'operators': [
(r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator),
(r'(in|and|or|not)\b', Operator.Word),
],
'names': [
('[a-zA-Z_]\w*', Name.Attribute),
(r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation,
String, Punctuation)),
(r'\$\S+', Name.Variable),
],
'numbers': [
# Copypasta from the Python lexer
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'keywords': [
# Left out 'group' and 'require'
# Since they're often used as attributes
(words((
'absent', 'alert', 'alias', 'audit', 'augeas', 'before', 'case',
'check', 'class', 'computer', 'configured', 'contained',
'create_resources', 'crit', 'cron', 'debug', 'default',
'define', 'defined', 'directory', 'else', 'elsif', 'emerg',
'err', 'exec', 'extlookup', 'fail', 'false', 'file',
'filebucket', 'fqdn_rand', 'generate', 'host', 'if', 'import',
'include', 'info', 'inherits', 'inline_template', 'installed',
'interface', 'k5login', 'latest', 'link', 'loglevel',
'macauthorization', 'mailalias', 'maillist', 'mcx', 'md5',
'mount', 'mounted', 'nagios_command', 'nagios_contact',
'nagios_contactgroup', 'nagios_host', 'nagios_hostdependency',
'nagios_hostescalation', 'nagios_hostextinfo', 'nagios_hostgroup',
'nagios_service', 'nagios_servicedependency', 'nagios_serviceescalation',
'nagios_serviceextinfo', 'nagios_servicegroup', 'nagios_timeperiod',
'node', 'noop', 'notice', 'notify', 'package', 'present', 'purged',
'realize', 'regsubst', 'resources', 'role', 'router', 'running',
'schedule', 'scheduled_task', 'search', 'selboolean', 'selmodule',
'service', 'sha1', 'shellquote', 'split', 'sprintf',
'ssh_authorized_key', 'sshkey', 'stage', 'stopped', 'subscribe',
'tag', 'tagged', 'template', 'tidy', 'true', 'undef', 'unmounted',
'user', 'versioncmp', 'vlan', 'warning', 'yumrepo', 'zfs', 'zone',
'zpool'), prefix='(?i)', suffix=r'\b'),
Keyword),
],
'strings': [
(r'"([^"])*"', String),
(r"'(\\'|[^'])*'", String),
],
}
class RslLexer(RegexLexer):
"""
`RSL <http://en.wikipedia.org/wiki/RAISE>`_ is the formal specification
language used in RAISE (Rigorous Approach to Industrial Software Engineering)
method.
.. versionadded:: 2.0
"""
name = 'RSL'
aliases = ['rsl']
filenames = ['*.rsl']
mimetypes = ['text/rsl']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(words((
'Bool', 'Char', 'Int', 'Nat', 'Real', 'Text', 'Unit', 'abs',
'all', 'always', 'any', 'as', 'axiom', 'card', 'case', 'channel',
'chaos', 'class', 'devt_relation', 'dom', 'elems', 'else', 'elif',
'end', 'exists', 'extend', 'false', 'for', 'hd', 'hide', 'if',
'in', 'is', 'inds', 'initialise', 'int', 'inter', 'isin', 'len',
'let', 'local', 'ltl_assertion', 'object', 'of', 'out', 'post',
'pre', 'read', 'real', 'rng', 'scheme', 'skip', 'stop', 'swap',
'then', 'theory', 'test_case', 'tl', 'transition_system', 'true',
'type', 'union', 'until', 'use', 'value', 'variable', 'while',
'with', 'write', '~isin', '-inflist', '-infset', '-list',
'-set'), prefix=r'\b', suffix=r'\b'),
Keyword),
(r'(variable|value)\b', Keyword.Declaration),
(r'--.*?\n', Comment),
(r'<:.*?:>', Comment),
(r'\{!.*?!\}', Comment),
(r'/\*.*?\*/', Comment),
(r'^[ \t]*([\w]+)[ \t]*:[^:]', Name.Function),
(r'(^[ \t]*)([\w]+)([ \t]*\([\w\s,]*\)[ \t]*)(is|as)',
bygroups(Text, Name.Function, Text, Keyword)),
(r'\b[A-Z]\w*\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'".*"', String),
(r'\'.\'', String.Char),
(r'(><|->|-m->|/\\|<=|<<=|<\.|\|\||\|\^\||-~->|-~m->|\\/|>=|>>|'
r'\.>|\+\+|-\\|<->|=>|:-|~=|\*\*|<<|>>=|\+>|!!|\|=\||#)',
Operator),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'.', Text),
],
}
def analyse_text(text):
"""
Check for the most common text in the beginning of a RSL file.
"""
if re.search(r'scheme\s*.*?=\s*class\s*type', text, re.I) is not None:
return 1.0
class MscgenLexer(RegexLexer):
"""
For `Mscgen <http://www.mcternan.me.uk/mscgen/>`_ files.
.. versionadded:: 1.6
"""
name = 'Mscgen'
aliases = ['mscgen', 'msc']
filenames = ['*.msc']
_var = r'(\w+|"(?:\\"|[^"])*")'
tokens = {
'root': [
(r'msc\b', Keyword.Type),
# Options
(r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS'
r'|arcgradient|ARCGRADIENT)\b', Name.Property),
# Operators
(r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word),
(r'(\.|-|\|){3}', Keyword),
(r'(?:-|=|\.|:){2}'
r'|<<=>>|<->|<=>|<<>>|<:>'
r'|->|=>>|>>|=>|:>|-x|-X'
r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator),
# Names
(r'\*', Name.Builtin),
(_var, Name.Variable),
# Other
(r'\[', Punctuation, 'attrs'),
(r'\{|\}|,|;', Punctuation),
include('comments')
],
'attrs': [
(r'\]', Punctuation, '#pop'),
(_var + r'(\s*)(=)(\s*)' + _var,
bygroups(Name.Attribute, Text.Whitespace, Operator, Text.Whitespace,
String)),
(r',', Punctuation),
include('comments')
],
'comments': [
(r'(?://|#).*?\n', Comment.Single),
(r'/\*(?:.|\n)*?\*/', Comment.Multiline),
(r'[ \t\r\n]+', Text.Whitespace)
]
}
class VGLLexer(RegexLexer):
"""
For `SampleManager VGL <http://www.thermoscientific.com/samplemanager>`_
source code.
.. versionadded:: 1.6
"""
name = 'VGL'
aliases = ['vgl']
filenames = ['*.rpf']
flags = re.MULTILINE | re.DOTALL | re.IGNORECASE
tokens = {
'root': [
(r'\{[^}]*\}', Comment.Multiline),
(r'declare', Keyword.Constant),
(r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object'
r'|create|on|line|with|global|routine|value|endroutine|constant'
r'|global|set|join|library|compile_option|file|exists|create|copy'
r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])',
Keyword),
(r'(true|false|null|empty|error|locked)', Keyword.Constant),
(r'[~^*#!%&\[\]()<>|+=:;,./?-]', Operator),
(r'"[^"]*"', String),
(r'(\.)([a-z_$][\w$]*)', bygroups(Operator, Name.Attribute)),
(r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number),
(r'[a-z_$][\w$]*', Name),
(r'[\r\n]+', Text),
(r'\s+', Text)
]
}
class AlloyLexer(RegexLexer):
"""
For `Alloy <http://alloy.mit.edu>`_ source code.
.. versionadded:: 2.0
"""
name = 'Alloy'
aliases = ['alloy']
filenames = ['*.als']
mimetypes = ['text/x-alloy']
flags = re.MULTILINE | re.DOTALL
iden_rex = r'[a-zA-Z_][\w\']*'
text_tuple = (r'[^\S\n]+', Text)
tokens = {
'sig': [
(r'(extends)\b', Keyword, '#pop'),
(iden_rex, Name),
text_tuple,
(r',', Punctuation),
(r'\{', Operator, '#pop'),
],
'module': [
text_tuple,
(iden_rex, Name, '#pop'),
],
'fun': [
text_tuple,
(r'\{', Operator, '#pop'),
(iden_rex, Name, '#pop'),
],
'root': [
(r'--.*?$', Comment.Single),
(r'//.*?$', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
text_tuple,
(r'(module|open)(\s+)', bygroups(Keyword.Namespace, Text),
'module'),
(r'(sig|enum)(\s+)', bygroups(Keyword.Declaration, Text), 'sig'),
(r'(iden|univ|none)\b', Keyword.Constant),
(r'(int|Int)\b', Keyword.Type),
(r'(this|abstract|extends|set|seq|one|lone|let)\b', Keyword),
(r'(all|some|no|sum|disj|when|else)\b', Keyword),
(r'(run|check|for|but|exactly|expect|as)\b', Keyword),
(r'(and|or|implies|iff|in)\b', Operator.Word),
(r'(fun|pred|fact|assert)(\s+)', bygroups(Keyword, Text), 'fun'),
(r'!|#|&&|\+\+|<<|>>|>=|<=>|<=|\.|->', Operator),
(r'[-+/*%=<>&!^|~{}\[\]().]', Operator),
(iden_rex, Name),
(r'[:,]', Punctuation),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\n', Text),
]
}
class PanLexer(RegexLexer):
"""
Lexer for `pan <http://github.com/quattor/pan/>`_ source files.
Based on tcsh lexer.
.. versionadded:: 2.0
"""
name = 'Pan'
aliases = ['pan']
filenames = ['*.pan']
tokens = {
'root': [
include('basic'),
(r'\(', Keyword, 'paren'),
(r'\{', Keyword, 'curly'),
include('data'),
],
'basic': [
(words((
'if', 'for', 'with', 'else', 'type', 'bind', 'while', 'valid', 'final', 'prefix',
'unique', 'object', 'foreach', 'include', 'template', 'function', 'variable',
'structure', 'extensible', 'declaration'), prefix=r'\b', suffix=r'\s*\b'),
Keyword),
(words((
'file_contents', 'format', 'index', 'length', 'match', 'matches', 'replace',
'splice', 'split', 'substr', 'to_lowercase', 'to_uppercase', 'debug', 'error',
'traceback', 'deprecated', 'base64_decode', 'base64_encode', 'digest', 'escape',
'unescape', 'append', 'create', 'first', 'nlist', 'key', 'list', 'merge', 'next',
'prepend', 'is_boolean', 'is_defined', 'is_double', 'is_list', 'is_long',
'is_nlist', 'is_null', 'is_number', 'is_property', 'is_resource', 'is_string',
'to_boolean', 'to_double', 'to_long', 'to_string', 'clone', 'delete', 'exists',
'path_exists', 'if_exists', 'return', 'value'), prefix=r'\b', suffix=r'\s*\b'),
Name.Builtin),
(r'#.*', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r';', Punctuation),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\;#]+', Text),
(r'\d+(?= |\Z)', Number),
],
'curly': [
(r'\}', Keyword, '#pop'),
(r':-', Keyword),
(r'\w+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
}
| bsd-3-clause |
Sweetgrassbuffalo/ReactionSweeGrass-v2 | .meteor/local/dev_bundle/python/Tools/webchecker/wcgui.py | 42 | 15090 | #! /usr/bin/env python
"""GUI interface to webchecker.
This works as a Grail applet too! E.g.
<APPLET CODE=wcgui.py NAME=CheckerWindow></APPLET>
Checkpoints are not (yet??? ever???) supported.
User interface:
Enter a root to check in the text entry box. To enter more than one root,
enter them one at a time and press <Return> for each one.
Command buttons Start, Stop and "Check one" govern the checking process in
the obvious way. Start and "Check one" also enter the root from the text
entry box if one is present. There's also a check box (enabled by default)
to decide whether actually to follow external links (since this can slow
the checking down considerably). Finally there's a Quit button.
A series of checkbuttons determines whether the corresponding output panel
is shown. List panels are also automatically shown or hidden when their
status changes between empty to non-empty. There are six panels:
Log -- raw output from the checker (-v, -q affect this)
To check -- links discovered but not yet checked
Checked -- links that have been checked
Bad links -- links that failed upon checking
Errors -- pages containing at least one bad link
Details -- details about one URL; double click on a URL in any of
the above list panels (not in Log) will show details
for that URL
Use your window manager's Close command to quit.
Command line options:
-m bytes -- skip HTML pages larger than this size (default %(MAXPAGE)d)
-q -- quiet operation (also suppresses external links report)
-v -- verbose operation; repeating -v will increase verbosity
-t root -- specify root dir which should be treated as internal (can repeat)
-a -- don't check name anchors
Command line arguments:
rooturl -- URL to start checking
(default %(DEFROOT)s)
XXX The command line options (-m, -q, -v) should be GUI accessible.
XXX The roots should be visible as a list (?).
XXX The multipanel user interface is clumsy.
"""
# ' Emacs bait
import sys
import getopt
from Tkinter import *
import tktools
import webchecker
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 't:m:qva')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print __doc__%vars(webchecker)
sys.exit(2)
webchecker.verbose = webchecker.VERBOSE
webchecker.nonames = webchecker.NONAMES
webchecker.maxpage = webchecker.MAXPAGE
extra_roots = []
for o, a in opts:
if o == '-m':
webchecker.maxpage = int(a)
if o == '-q':
webchecker.verbose = 0
if o == '-v':
webchecker.verbose = webchecker.verbose + 1
if o == '-t':
extra_roots.append(a)
if o == '-a':
webchecker.nonames = not webchecker.nonames
root = Tk(className='Webchecker')
root.protocol("WM_DELETE_WINDOW", root.quit)
c = CheckerWindow(root)
c.setflags(verbose=webchecker.verbose, maxpage=webchecker.maxpage,
nonames=webchecker.nonames)
if args:
for arg in args[:-1]:
c.addroot(arg)
c.suggestroot(args[-1])
# Usually conditioned on whether external links
# will be checked, but since that's not a command
# line option, just toss them in.
for url_root in extra_roots:
# Make sure it's terminated by a slash,
# so that addroot doesn't discard the last
# directory component.
if url_root[-1] != "/":
url_root = url_root + "/"
c.addroot(url_root, add_to_do = 0)
root.mainloop()
class CheckerWindow(webchecker.Checker):
def __init__(self, parent, root=webchecker.DEFROOT):
self.__parent = parent
self.__topcontrols = Frame(parent)
self.__topcontrols.pack(side=TOP, fill=X)
self.__label = Label(self.__topcontrols, text="Root URL:")
self.__label.pack(side=LEFT)
self.__rootentry = Entry(self.__topcontrols, width=60)
self.__rootentry.pack(side=LEFT)
self.__rootentry.bind('<Return>', self.enterroot)
self.__rootentry.focus_set()
self.__controls = Frame(parent)
self.__controls.pack(side=TOP, fill=X)
self.__running = 0
self.__start = Button(self.__controls, text="Run", command=self.start)
self.__start.pack(side=LEFT)
self.__stop = Button(self.__controls, text="Stop", command=self.stop,
state=DISABLED)
self.__stop.pack(side=LEFT)
self.__step = Button(self.__controls, text="Check one",
command=self.step)
self.__step.pack(side=LEFT)
self.__cv = BooleanVar(parent)
self.__cv.set(self.checkext)
self.__checkext = Checkbutton(self.__controls, variable=self.__cv,
command=self.update_checkext,
text="Check nonlocal links",)
self.__checkext.pack(side=LEFT)
self.__reset = Button(self.__controls, text="Start over", command=self.reset)
self.__reset.pack(side=LEFT)
if __name__ == '__main__': # No Quit button under Grail!
self.__quit = Button(self.__controls, text="Quit",
command=self.__parent.quit)
self.__quit.pack(side=RIGHT)
self.__status = Label(parent, text="Status: initial", anchor=W)
self.__status.pack(side=TOP, fill=X)
self.__checking = Label(parent, text="Idle", anchor=W)
self.__checking.pack(side=TOP, fill=X)
self.__mp = mp = MultiPanel(parent)
sys.stdout = self.__log = LogPanel(mp, "Log")
self.__todo = ListPanel(mp, "To check", self, self.showinfo)
self.__done = ListPanel(mp, "Checked", self, self.showinfo)
self.__bad = ListPanel(mp, "Bad links", self, self.showinfo)
self.__errors = ListPanel(mp, "Pages w/ bad links", self, self.showinfo)
self.__details = LogPanel(mp, "Details")
self.root_seed = None
webchecker.Checker.__init__(self)
if root:
root = str(root).strip()
if root:
self.suggestroot(root)
self.newstatus()
def reset(self):
webchecker.Checker.reset(self)
for p in self.__todo, self.__done, self.__bad, self.__errors:
p.clear()
if self.root_seed:
self.suggestroot(self.root_seed)
def suggestroot(self, root):
self.__rootentry.delete(0, END)
self.__rootentry.insert(END, root)
self.__rootentry.select_range(0, END)
self.root_seed = root
def enterroot(self, event=None):
root = self.__rootentry.get()
root = root.strip()
if root:
self.__checking.config(text="Adding root "+root)
self.__checking.update_idletasks()
self.addroot(root)
self.__checking.config(text="Idle")
try:
i = self.__todo.items.index(root)
except (ValueError, IndexError):
pass
else:
self.__todo.list.select_clear(0, END)
self.__todo.list.select_set(i)
self.__todo.list.yview(i)
self.__rootentry.delete(0, END)
def start(self):
self.__start.config(state=DISABLED, relief=SUNKEN)
self.__stop.config(state=NORMAL)
self.__step.config(state=DISABLED)
self.enterroot()
self.__running = 1
self.go()
def stop(self):
self.__stop.config(state=DISABLED, relief=SUNKEN)
self.__running = 0
def step(self):
self.__start.config(state=DISABLED)
self.__step.config(state=DISABLED, relief=SUNKEN)
self.enterroot()
self.__running = 0
self.dosomething()
def go(self):
if self.__running:
self.__parent.after_idle(self.dosomething)
else:
self.__checking.config(text="Idle")
self.__start.config(state=NORMAL, relief=RAISED)
self.__stop.config(state=DISABLED, relief=RAISED)
self.__step.config(state=NORMAL, relief=RAISED)
__busy = 0
def dosomething(self):
if self.__busy: return
self.__busy = 1
if self.todo:
l = self.__todo.selectedindices()
if l:
i = l[0]
else:
i = 0
self.__todo.list.select_set(i)
self.__todo.list.yview(i)
url = self.__todo.items[i]
self.__checking.config(text="Checking "+self.format_url(url))
self.__parent.update()
self.dopage(url)
else:
self.stop()
self.__busy = 0
self.go()
def showinfo(self, url):
d = self.__details
d.clear()
d.put("URL: %s\n" % self.format_url(url))
if self.bad.has_key(url):
d.put("Error: %s\n" % str(self.bad[url]))
if url in self.roots:
d.put("Note: This is a root URL\n")
if self.done.has_key(url):
d.put("Status: checked\n")
o = self.done[url]
elif self.todo.has_key(url):
d.put("Status: to check\n")
o = self.todo[url]
else:
d.put("Status: unknown (!)\n")
o = []
if (not url[1]) and self.errors.has_key(url[0]):
d.put("Bad links from this page:\n")
for triple in self.errors[url[0]]:
link, rawlink, msg = triple
d.put(" HREF %s" % self.format_url(link))
if self.format_url(link) != rawlink: d.put(" (%s)" %rawlink)
d.put("\n")
d.put(" error %s\n" % str(msg))
self.__mp.showpanel("Details")
for source, rawlink in o:
d.put("Origin: %s" % source)
if rawlink != self.format_url(url):
d.put(" (%s)" % rawlink)
d.put("\n")
d.text.yview("1.0")
def setbad(self, url, msg):
webchecker.Checker.setbad(self, url, msg)
self.__bad.insert(url)
self.newstatus()
def setgood(self, url):
webchecker.Checker.setgood(self, url)
self.__bad.remove(url)
self.newstatus()
def newlink(self, url, origin):
webchecker.Checker.newlink(self, url, origin)
if self.done.has_key(url):
self.__done.insert(url)
elif self.todo.has_key(url):
self.__todo.insert(url)
self.newstatus()
def markdone(self, url):
webchecker.Checker.markdone(self, url)
self.__done.insert(url)
self.__todo.remove(url)
self.newstatus()
def seterror(self, url, triple):
webchecker.Checker.seterror(self, url, triple)
self.__errors.insert((url, ''))
self.newstatus()
def newstatus(self):
self.__status.config(text="Status: "+self.status())
self.__parent.update()
def update_checkext(self):
self.checkext = self.__cv.get()
class ListPanel:
def __init__(self, mp, name, checker, showinfo=None):
self.mp = mp
self.name = name
self.showinfo = showinfo
self.checker = checker
self.panel = mp.addpanel(name)
self.list, self.frame = tktools.make_list_box(
self.panel, width=60, height=5)
self.list.config(exportselection=0)
if showinfo:
self.list.bind('<Double-Button-1>', self.doubleclick)
self.items = []
def clear(self):
self.items = []
self.list.delete(0, END)
self.mp.hidepanel(self.name)
def doubleclick(self, event):
l = self.selectedindices()
if l:
self.showinfo(self.items[l[0]])
def selectedindices(self):
l = self.list.curselection()
if not l: return []
return map(int, l)
def insert(self, url):
if url not in self.items:
if not self.items:
self.mp.showpanel(self.name)
# (I tried sorting alphabetically, but the display is too jumpy)
i = len(self.items)
self.list.insert(i, self.checker.format_url(url))
self.list.yview(i)
self.items.insert(i, url)
def remove(self, url):
try:
i = self.items.index(url)
except (ValueError, IndexError):
pass
else:
was_selected = i in self.selectedindices()
self.list.delete(i)
del self.items[i]
if not self.items:
self.mp.hidepanel(self.name)
elif was_selected:
if i >= len(self.items):
i = len(self.items) - 1
self.list.select_set(i)
class LogPanel:
def __init__(self, mp, name):
self.mp = mp
self.name = name
self.panel = mp.addpanel(name)
self.text, self.frame = tktools.make_text_box(self.panel, height=10)
self.text.config(wrap=NONE)
def clear(self):
self.text.delete("1.0", END)
self.text.yview("1.0")
def put(self, s):
self.text.insert(END, s)
if '\n' in s:
self.text.yview(END)
def write(self, s):
self.text.insert(END, s)
if '\n' in s:
self.text.yview(END)
self.panel.update()
class MultiPanel:
def __init__(self, parent):
self.parent = parent
self.frame = Frame(self.parent)
self.frame.pack(expand=1, fill=BOTH)
self.topframe = Frame(self.frame, borderwidth=2, relief=RAISED)
self.topframe.pack(fill=X)
self.botframe = Frame(self.frame)
self.botframe.pack(expand=1, fill=BOTH)
self.panelnames = []
self.panels = {}
def addpanel(self, name, on=0):
v = StringVar(self.parent)
if on:
v.set(name)
else:
v.set("")
check = Checkbutton(self.topframe, text=name,
offvalue="", onvalue=name, variable=v,
command=self.checkpanel)
check.pack(side=LEFT)
panel = Frame(self.botframe)
label = Label(panel, text=name, borderwidth=2, relief=RAISED, anchor=W)
label.pack(side=TOP, fill=X)
t = v, check, panel
self.panelnames.append(name)
self.panels[name] = t
if on:
panel.pack(expand=1, fill=BOTH)
return panel
def showpanel(self, name):
v, check, panel = self.panels[name]
v.set(name)
panel.pack(expand=1, fill=BOTH)
def hidepanel(self, name):
v, check, panel = self.panels[name]
v.set("")
panel.pack_forget()
def checkpanel(self):
for name in self.panelnames:
v, check, panel = self.panels[name]
panel.pack_forget()
for name in self.panelnames:
v, check, panel = self.panels[name]
if v.get():
panel.pack(expand=1, fill=BOTH)
if __name__ == '__main__':
main()
| gpl-3.0 |
barnsnake351/neutron | neutron/agent/linux/polling.py | 30 | 2108 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import eventlet
from neutron.agent.common import base_polling
from neutron.agent.linux import ovsdb_monitor
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
@contextlib.contextmanager
def get_polling_manager(minimize_polling=False,
ovsdb_monitor_respawn_interval=(
constants.DEFAULT_OVSDBMON_RESPAWN)):
if minimize_polling:
pm = InterfacePollingMinimizer(
ovsdb_monitor_respawn_interval=ovsdb_monitor_respawn_interval)
pm.start()
else:
pm = base_polling.AlwaysPoll()
try:
yield pm
finally:
if minimize_polling:
pm.stop()
class InterfacePollingMinimizer(base_polling.BasePollingManager):
"""Monitors ovsdb to determine when polling is required."""
def __init__(
self,
ovsdb_monitor_respawn_interval=constants.DEFAULT_OVSDBMON_RESPAWN):
super(InterfacePollingMinimizer, self).__init__()
self._monitor = ovsdb_monitor.SimpleInterfaceMonitor(
respawn_interval=ovsdb_monitor_respawn_interval)
def start(self):
self._monitor.start()
def stop(self):
self._monitor.stop()
def _is_polling_required(self):
# Maximize the chances of update detection having a chance to
# collect output.
eventlet.sleep()
return self._monitor.has_updates
def get_events(self):
return self._monitor.get_events()
| apache-2.0 |
Nowis75/crazyflie-pc-client-leapmotion | setup.py | 4 | 2864 | #!/usr/bin/env python
from distutils.core import setup
import glob
import os
VERSION = '2013.4.99' # Year.Month.fix if fix=99 means dev version
try:
import py2exe
except ImportError:
print("Warning: py2exe not usable")
with open(os.path.join(os.path.dirname(__file__),
"lib",
"cfclient",
"version.py"), "w") as versionpy:
versionpy.write("VERSION='{}'".format(VERSION))
setup(name='cfclient',
description='Bitcraze Cazyflie nano quadcopter client',
version=VERSION,
author='Bitcraze team',
author_email='contact@bitcraze.se',
url='http://www.bitcraze.se',
package_dir={'': 'lib'},
packages=['cfclient', 'cfclient.ui', 'cfclient.ui.tabs',
'cfclient.ui.toolboxes', 'cfclient.ui.widgets',
'cfclient.utils', 'cfclient.ui.dialogs', 'cflib',
'cflib.bootloader', 'cflib.crazyflie', 'cflib.drivers',
'cflib.utils', 'cflib.crtp'],
scripts=['bin/cfclient'],
# Py2exe specifics
console=['bin/cfclient'],
data_files=[('', ['README.md', 'LICENSE.txt']),
('cfclient/ui',
glob.glob('lib/cfclient/ui/*.ui')),
('cfclient/ui/tabs',
glob.glob('lib/cfclient/ui/tabs/*.ui')),
('cfclient/ui/widgets',
glob.glob('lib/cfclient/ui/widgets/*.ui')),
('cfclient/ui/toolboxes',
glob.glob('lib/cfclient/ui/toolboxes/*.ui')),
('cfclient/ui/dialogs',
glob.glob('lib/cfclient/ui/dialogs/*.ui')),
('cfclient/configs',
glob.glob('lib/cfclient/configs/*.json')),
('cflib/cache',
glob.glob('lib/cflib/cache/*.json')),
('cfclient/configs/input',
glob.glob('lib/cfclient/configs/input/*.json')),
('cfclient/configs/log',
glob.glob('lib/cfclient/configs/log/*.json'))],
options={"py2exe": {"includes": ["sip", "PyQt4",
"cfclient.ui.widgets",
"cflib.bootloader.cloader",
"cfclient.ui.toolboxes.*",
"cfclient.ui.*", "cfclient.ui.tabs.*",
"cfclient.ui.widgets.*",
"cfclient.ui.dialogs.*",
"pygame._view"],
"excludes": ["AppKit"],
"skip_archive": True}})
os.remove(os.path.join(os.path.dirname(__file__),
"lib",
"cfclient",
"version.py"))
| gpl-2.0 |
shusenl/scikit-learn | sklearn/neighbors/nearest_centroid.py | 199 | 7249 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
marc-sensenich/ansible | lib/ansible/module_utils/network/sros/sros.py | 86 | 4333 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2016 Peter Sprygada, <psprygada@ansible.com>
#
# Redistribution and use in source and binary forms, with or without
# modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,
# this list of conditions and the following disclaimer in the
# documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.connection import exec_command
_DEVICE_CONFIGS = {}
sros_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'timeout': dict(type='int'),
}
sros_argument_spec = {
'provider': dict(type='dict', options=sros_provider_spec),
}
sros_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'timeout': dict(removed_in_version=2.9, type='int'),
}
sros_argument_spec.update(sros_top_spec)
def check_args(module, warnings):
pass
def get_config(module, flags=None):
flags = [] if flags is None else flags
cmd = 'admin display-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
rc, out, err = exec_command(module, cmd)
if rc != 0:
module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_or_strict'))
cfg = to_text(out, errors='surrogate_or_strict').strip()
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc)
responses.append(to_text(out, errors='surrogate_or_strict'))
return responses
def load_config(module, commands):
for command in to_list(commands):
rc, out, err = exec_command(module, command)
if rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), command=command, rc=rc)
exec_command(module, 'exit all')
| gpl-3.0 |
diagramsoftware/odoo | openerp/addons/base/ir/ir_config_parameter.py | 30 | 4709 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Store database-specific configuration parameters
"""
import uuid
import datetime
import logging
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import misc, config, mute_logger
_logger = logging.getLogger(__name__)
"""
A dictionary holding some configuration parameters to be initialized when the database is created.
"""
_default_parameters = {
"database.uuid": lambda: (str(uuid.uuid1()), []),
"database.create_date": lambda: (datetime.datetime.now().strftime(misc.DEFAULT_SERVER_DATETIME_FORMAT), ['base.group_user']),
"web.base.url": lambda: ("http://localhost:%s" % config.get('xmlrpc_port'), []),
}
class ir_config_parameter(osv.osv):
"""Per-database storage of configuration key-value pairs."""
_name = 'ir.config_parameter'
_rec_name = 'key'
_columns = {
'key': fields.char('Key', required=True, select=1),
'value': fields.text('Value', required=True),
'group_ids': fields.many2many('res.groups', 'ir_config_parameter_groups_rel', 'icp_id', 'group_id', string='Groups'),
}
_sql_constraints = [
('key_uniq', 'unique (key)', 'Key must be unique.')
]
@mute_logger('openerp.addons.base.ir.ir_config_parameter')
def init(self, cr, force=False):
"""
Initializes the parameters listed in _default_parameters.
It overrides existing parameters if force is ``True``.
"""
for key, func in _default_parameters.iteritems():
# force=True skips search and always performs the 'if' body (because ids=False)
ids = not force and self.search(cr, SUPERUSER_ID, [('key','=',key)])
if not ids:
value, groups = func()
self.set_param(cr, SUPERUSER_ID, key, value, groups=groups)
def get_param(self, cr, uid, key, default=False, context=None):
"""Retrieve the value for a given key.
:param string key: The key of the parameter value to retrieve.
:param string default: default value if parameter is missing.
:return: The value of the parameter, or ``default`` if it does not exist.
:rtype: string
"""
ids = self.search(cr, uid, [('key','=',key)], context=context)
if not ids:
return default
param = self.browse(cr, uid, ids[0], context=context)
value = param.value
return value
def set_param(self, cr, uid, key, value, groups=[], context=None):
"""Sets the value of a parameter.
:param string key: The key of the parameter value to set.
:param string value: The value to set.
:param list of string groups: List of group (xml_id allowed) to read this key.
:return: the previous value of the parameter or False if it did
not exist.
:rtype: string
"""
ids = self.search(cr, uid, [('key','=',key)], context=context)
gids = []
for group_xml in groups:
res_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, group_xml)
if res_id:
gids.append((4, res_id))
else:
_logger.warning('Potential Security Issue: Group [%s] is not found.' % group_xml)
vals = {'value': value}
if gids:
vals.update(group_ids=gids)
if ids:
param = self.browse(cr, uid, ids[0], context=context)
old = param.value
self.write(cr, uid, ids, vals, context=context)
return old
else:
vals.update(key=key)
self.create(cr, uid, vals, context=context)
return False
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Zhaoyanzhang/-myflasky | venv/lib/python2.7/site-packages/sqlalchemy/dialects/postgresql/array.py | 32 | 10320 | # postgresql/array.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .base import ischema_names
from ...sql import expression, operators
from ...sql.base import SchemaEventTarget
from ... import types as sqltypes
try:
from uuid import UUID as _python_UUID
except ImportError:
_python_UUID = None
def Any(other, arrexpr, operator=operators.eq):
"""A synonym for the :meth:`.ARRAY.Comparator.any` method.
This method is legacy and is here for backwards-compatibility.
.. seealso::
:func:`.expression.any_`
"""
return arrexpr.any(other, operator)
def All(other, arrexpr, operator=operators.eq):
"""A synonym for the :meth:`.ARRAY.Comparator.all` method.
This method is legacy and is here for backwards-compatibility.
.. seealso::
:func:`.expression.all_`
"""
return arrexpr.all(other, operator)
class array(expression.Tuple):
"""A PostgreSQL ARRAY literal.
This is used to produce ARRAY literals in SQL expressions, e.g.::
from sqlalchemy.dialects.postgresql import array
from sqlalchemy.dialects import postgresql
from sqlalchemy import select, func
stmt = select([
array([1,2]) + array([3,4,5])
])
print stmt.compile(dialect=postgresql.dialect())
Produces the SQL::
SELECT ARRAY[%(param_1)s, %(param_2)s] ||
ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
An instance of :class:`.array` will always have the datatype
:class:`.ARRAY`. The "inner" type of the array is inferred from
the values present, unless the ``type_`` keyword argument is passed::
array(['foo', 'bar'], type_=CHAR)
.. versionadded:: 0.8 Added the :class:`~.postgresql.array` literal type.
See also:
:class:`.postgresql.ARRAY`
"""
__visit_name__ = 'array'
def __init__(self, clauses, **kw):
super(array, self).__init__(*clauses, **kw)
self.type = ARRAY(self.type)
def _bind_param(self, operator, obj, _assume_scalar=False, type_=None):
if _assume_scalar or operator is operators.getitem:
# if getitem->slice were called, Indexable produces
# a Slice object from that
assert isinstance(obj, int)
return expression.BindParameter(
None, obj, _compared_to_operator=operator,
type_=type_,
_compared_to_type=self.type, unique=True)
else:
return array([
self._bind_param(operator, o, _assume_scalar=True, type_=type_)
for o in obj])
def self_group(self, against=None):
if (against in (
operators.any_op, operators.all_op, operators.getitem)):
return expression.Grouping(self)
else:
return self
CONTAINS = operators.custom_op("@>", precedence=5)
CONTAINED_BY = operators.custom_op("<@", precedence=5)
OVERLAP = operators.custom_op("&&", precedence=5)
class ARRAY(SchemaEventTarget, sqltypes.ARRAY):
"""PostgreSQL ARRAY type.
.. versionchanged:: 1.1 The :class:`.postgresql.ARRAY` type is now
a subclass of the core :class:`.types.ARRAY` type.
The :class:`.postgresql.ARRAY` type is constructed in the same way
as the core :class:`.types.ARRAY` type; a member type is required, and a
number of dimensions is recommended if the type is to be used for more
than one dimension::
from sqlalchemy.dialects import postgresql
mytable = Table("mytable", metadata,
Column("data", postgresql.ARRAY(Integer, dimensions=2))
)
The :class:`.postgresql.ARRAY` type provides all operations defined on the
core :class:`.types.ARRAY` type, including support for "dimensions", indexed
access, and simple matching such as :meth:`.types.ARRAY.Comparator.any`
and :meth:`.types.ARRAY.Comparator.all`. :class:`.postgresql.ARRAY` class also
provides PostgreSQL-specific methods for containment operations, including
:meth:`.postgresql.ARRAY.Comparator.contains`
:meth:`.postgresql.ARRAY.Comparator.contained_by`,
and :meth:`.postgresql.ARRAY.Comparator.overlap`, e.g.::
mytable.c.data.contains([1, 2])
The :class:`.postgresql.ARRAY` type may not be supported on all
PostgreSQL DBAPIs; it is currently known to work on psycopg2 only.
Additionally, the :class:`.postgresql.ARRAY` type does not work directly in
conjunction with the :class:`.ENUM` type. For a workaround, see the
special type at :ref:`postgresql_array_of_enum`.
.. seealso::
:class:`.types.ARRAY` - base array type
:class:`.postgresql.array` - produces a literal array value.
"""
class Comparator(sqltypes.ARRAY.Comparator):
"""Define comparison operations for :class:`.ARRAY`.
Note that these operations are in addition to those provided
by the base :class:`.types.ARRAY.Comparator` class, including
:meth:`.types.ARRAY.Comparator.any` and
:meth:`.types.ARRAY.Comparator.all`.
"""
def contains(self, other, **kwargs):
"""Boolean expression. Test if elements are a superset of the
elements of the argument array expression.
"""
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if elements are a proper subset of the
elements of the argument array expression.
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean)
def overlap(self, other):
"""Boolean expression. Test if array has elements in common with
an argument array expression.
"""
return self.operate(OVERLAP, other, result_type=sqltypes.Boolean)
comparator_factory = Comparator
def __init__(self, item_type, as_tuple=False, dimensions=None,
zero_indexes=False):
"""Construct an ARRAY.
E.g.::
Column('myarray', ARRAY(Integer))
Arguments are:
:param item_type: The data type of items of this array. Note that
dimensionality is irrelevant here, so multi-dimensional arrays like
``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
``ARRAY(ARRAY(Integer))`` or such.
:param as_tuple=False: Specify whether return results
should be converted to tuples from lists. DBAPIs such
as psycopg2 return lists by default. When tuples are
returned, the results are hashable.
:param dimensions: if non-None, the ARRAY will assume a fixed
number of dimensions. This will cause the DDL emitted for this
ARRAY to include the exact number of bracket clauses ``[]``,
and will also optimize the performance of the type overall.
Note that PG arrays are always implicitly "non-dimensioned",
meaning they can store any number of dimensions no matter how
they were declared.
:param zero_indexes=False: when True, index values will be converted
between Python zero-based and PostgreSQL one-based indexes, e.g.
a value of one will be added to all index values before passing
to the database.
.. versionadded:: 0.9.5
"""
if isinstance(item_type, ARRAY):
raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
"handles multi-dimensional arrays of basetype")
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
self.as_tuple = as_tuple
self.dimensions = dimensions
self.zero_indexes = zero_indexes
@property
def hashable(self):
return self.as_tuple
@property
def python_type(self):
return list
def compare_values(self, x, y):
return x == y
def _set_parent(self, column):
"""Support SchemaEventTarget"""
if isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent(column)
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEventTarget"""
if isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent_with_dispatch(parent)
def _proc_array(self, arr, itemproc, dim, collection):
if dim is None:
arr = list(arr)
if dim == 1 or dim is None and (
# this has to be (list, tuple), or at least
# not hasattr('__iter__'), since Py3K strings
# etc. have __iter__
not arr or not isinstance(arr[0], (list, tuple))):
if itemproc:
return collection(itemproc(x) for x in arr)
else:
return collection(arr)
else:
return collection(
self._proc_array(
x, itemproc,
dim - 1 if dim is not None else None,
collection)
for x in arr
)
def bind_processor(self, dialect):
item_proc = self.item_type.dialect_impl(dialect).\
bind_processor(dialect)
def process(value):
if value is None:
return value
else:
return self._proc_array(
value,
item_proc,
self.dimensions,
list)
return process
def result_processor(self, dialect, coltype):
item_proc = self.item_type.dialect_impl(dialect).\
result_processor(dialect, coltype)
def process(value):
if value is None:
return value
else:
return self._proc_array(
value,
item_proc,
self.dimensions,
tuple if self.as_tuple else list)
return process
ischema_names['_array'] = ARRAY
| mit |
kanteshraj/ansible | lib/ansible/playbook/play_context.py | 8 | 17268 | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pipes
import random
import re
import string
from six import iteritems, string_types
from six.moves import range
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.template import Templar
from ansible.utils.boolean import boolean
from ansible.utils.unicode import to_unicode
__all__ = ['PlayContext']
# the magic variable mapping dictionary below is used to translate
# host/inventory variables to fields in the PlayContext
# object. The dictionary values are tuples, to account for aliases
# in variable names.
MAGIC_VARIABLE_MAPPING = dict(
connection = ('ansible_connection',),
remote_addr = ('ansible_ssh_host', 'ansible_host'),
remote_user = ('ansible_ssh_user', 'ansible_user'),
port = ('ansible_ssh_port', 'ansible_port'),
password = ('ansible_ssh_pass', 'ansible_password'),
private_key_file = ('ansible_ssh_private_key_file', 'ansible_private_key_file'),
pipelining = ('ansible_ssh_pipelining', 'ansible_pipelining'),
shell = ('ansible_shell_type',),
become = ('ansible_become',),
become_method = ('ansible_become_method',),
become_user = ('ansible_become_user',),
become_pass = ('ansible_become_password','ansible_become_pass'),
become_exe = ('ansible_become_exe',),
become_flags = ('ansible_become_flags',),
sudo = ('ansible_sudo',),
sudo_user = ('ansible_sudo_user',),
sudo_pass = ('ansible_sudo_password', 'ansible_sudo_pass'),
sudo_exe = ('ansible_sudo_exe',),
sudo_flags = ('ansible_sudo_flags',),
su = ('ansible_su',),
su_user = ('ansible_su_user',),
su_pass = ('ansible_su_password', 'ansible_su_pass'),
su_exe = ('ansible_su_exe',),
su_flags = ('ansible_su_flags',),
)
SU_PROMPT_LOCALIZATIONS = [
'Password',
'암호',
'パスワード',
'Adgangskode',
'Contraseña',
'Contrasenya',
'Hasło',
'Heslo',
'Jelszó',
'Lösenord',
'Mật khẩu',
'Mot de passe',
'Parola',
'Parool',
'Pasahitza',
'Passord',
'Passwort',
'Salasana',
'Sandi',
'Senha',
'Wachtwoord',
'ססמה',
'Лозинка',
'Парола',
'Пароль',
'गुप्तशब्द',
'शब्दकूट',
'సంకేతపదము',
'හස්පදය',
'密码',
'密碼',
]
TASK_ATTRIBUTE_OVERRIDES = (
'become',
'become_user',
'become_pass',
'become_method',
'connection',
'delegate_to',
'no_log',
'remote_user',
)
class PlayContext(Base):
'''
This class is used to consolidate the connection information for
hosts in a play and child tasks, where the task may override some
connection/authentication information.
'''
# connection fields, some are inherited from Base:
# (connection, port, remote_user, environment, no_log)
_remote_addr = FieldAttribute(isa='string')
_password = FieldAttribute(isa='string')
_private_key_file = FieldAttribute(isa='string', default=C.DEFAULT_PRIVATE_KEY_FILE)
_timeout = FieldAttribute(isa='int', default=C.DEFAULT_TIMEOUT)
_shell = FieldAttribute(isa='string')
_ssh_extra_args = FieldAttribute(isa='string')
_connection_lockfd= FieldAttribute(isa='int')
_pipelining = FieldAttribute(isa='bool', default=C.ANSIBLE_SSH_PIPELINING)
# privilege escalation fields
_become = FieldAttribute(isa='bool')
_become_method = FieldAttribute(isa='string')
_become_user = FieldAttribute(isa='string')
_become_pass = FieldAttribute(isa='string')
_become_exe = FieldAttribute(isa='string')
_become_flags = FieldAttribute(isa='string')
_prompt = FieldAttribute(isa='string')
# backwards compatibility fields for sudo/su
_sudo_exe = FieldAttribute(isa='string')
_sudo_flags = FieldAttribute(isa='string')
_sudo_pass = FieldAttribute(isa='string')
_su_exe = FieldAttribute(isa='string')
_su_flags = FieldAttribute(isa='string')
_su_pass = FieldAttribute(isa='string')
# general flags
_verbosity = FieldAttribute(isa='int', default=0)
_only_tags = FieldAttribute(isa='set', default=set())
_skip_tags = FieldAttribute(isa='set', default=set())
_check_mode = FieldAttribute(isa='bool', default=False)
_force_handlers = FieldAttribute(isa='bool', default=False)
_start_at_task = FieldAttribute(isa='string')
_step = FieldAttribute(isa='bool', default=False)
_diff = FieldAttribute(isa='bool', default=False)
def __init__(self, play=None, options=None, passwords=None, connection_lockfd=None):
super(PlayContext, self).__init__()
if passwords is None:
passwords = {}
self.password = passwords.get('conn_pass','')
self.become_pass = passwords.get('become_pass','')
self.prompt = ''
self.success_key = ''
# a file descriptor to be used during locking operations
self.connection_lockfd = connection_lockfd
# set options before play to allow play to override them
if options:
self.set_options(options)
if play:
self.set_play(play)
def set_play(self, play):
'''
Configures this connection information instance with data from
the play class.
'''
if play.connection:
self.connection = play.connection
if play.remote_user:
self.remote_user = play.remote_user
if play.port:
self.port = int(play.port)
if play.become is not None:
self.become = play.become
if play.become_method:
self.become_method = play.become_method
if play.become_user:
self.become_user = play.become_user
# non connection related
self.no_log = play.no_log
if play.force_handlers is not None:
self.force_handlers = play.force_handlers
def set_options(self, options):
'''
Configures this connection information instance with data from
options specified by the user on the command line. These have a
lower precedence than those set on the play or host.
'''
if options.connection:
self.connection = options.connection
self.remote_user = options.remote_user
self.private_key_file = options.private_key_file
self.ssh_extra_args = options.ssh_extra_args
# privilege escalation
self.become = options.become
self.become_method = options.become_method
self.become_user = options.become_user
# general flags (should we move out?)
if options.verbosity:
self.verbosity = options.verbosity
#if options.no_log:
# self.no_log = boolean(options.no_log)
if options.check:
self.check_mode = boolean(options.check)
if hasattr(options, 'force_handlers') and options.force_handlers:
self.force_handlers = boolean(options.force_handlers)
if hasattr(options, 'step') and options.step:
self.step = boolean(options.step)
if hasattr(options, 'start_at_task') and options.start_at_task:
self.start_at_task = to_unicode(options.start_at_task)
if hasattr(options, 'diff') and options.diff:
self.diff = boolean(options.diff)
if hasattr(options, 'timeout') and options.timeout:
self.timeout = int(options.timeout)
# get the tag info from options, converting a comma-separated list
# of values into a proper list if need be. We check to see if the
# options have the attribute, as it is not always added via the CLI
if hasattr(options, 'tags'):
if isinstance(options.tags, list):
self.only_tags.update(options.tags)
elif isinstance(options.tags, string_types):
self.only_tags.update(options.tags.split(','))
if len(self.only_tags) == 0:
self.only_tags = set(['all'])
if hasattr(options, 'skip_tags'):
if isinstance(options.skip_tags, list):
self.skip_tags.update(options.skip_tags)
elif isinstance(options.skip_tags, string_types):
self.skip_tags.update(options.skip_tags.split(','))
def set_task_and_variable_override(self, task, variables, templar):
'''
Sets attributes from the task if they are set, which will override
those from the play.
'''
new_info = self.copy()
# loop through a subset of attributes on the task object and set
# connection fields based on their values
for attr in TASK_ATTRIBUTE_OVERRIDES:
if hasattr(task, attr):
attr_val = getattr(task, attr)
if attr_val is not None:
setattr(new_info, attr, attr_val)
# next, use the MAGIC_VARIABLE_MAPPING dictionary to update this
# connection info object with 'magic' variables from the variable list.
# If the value 'ansible_delegated_vars' is in the variables, it means
# we have a delegated-to host, so we check there first before looking
# at the variables in general
if task.delegate_to is not None:
# In the case of a loop, the delegated_to host may have been
# templated based on the loop variable, so we try and locate
# the host name in the delegated variable dictionary here
delegated_host_name = templar.template(task.delegate_to)
delegated_vars = variables.get('ansible_delegated_vars', dict()).get(delegated_host_name, dict())
else:
delegated_vars = dict()
for (attr, variable_names) in iteritems(MAGIC_VARIABLE_MAPPING):
for variable_name in variable_names:
if isinstance(delegated_vars, dict) and variable_name in delegated_vars:
setattr(new_info, attr, delegated_vars[variable_name])
elif variable_name in variables:
setattr(new_info, attr, variables[variable_name])
# make sure we get port defaults if needed
if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None:
new_info.port = int(C.DEFAULT_REMOTE_PORT)
# become legacy updates
if not new_info.become_pass:
if new_info.become_method == 'sudo' and new_info.sudo_pass:
setattr(new_info, 'become_pass', new_info.sudo_pass)
elif new_info.become_method == 'su' and new_info.su_pass:
setattr(new_info, 'become_pass', new_info.su_pass)
# finally, in the special instance that the task was specified
# as a local action, override the connection in case it was changed
# during some other step in the process
if task._local_action:
setattr(new_info, 'connection', 'local')
return new_info
def make_become_cmd(self, cmd, executable=None):
""" helper function to create privilege escalation commands """
prompt = None
success_key = None
self.prompt = None
if executable is None:
executable = C.DEFAULT_EXECUTABLE
if self.become:
becomecmd = None
randbits = ''.join(random.choice(string.ascii_lowercase) for x in range(32))
success_key = 'BECOME-SUCCESS-%s' % randbits
success_cmd = pipes.quote('echo %s; %s' % (success_key, cmd))
# set executable to use for the privilege escalation method, with various overrides
exe = self.become_exe or \
getattr(self, '%s_exe' % self.become_method, None) or \
C.DEFAULT_BECOME_EXE or \
getattr(C, 'DEFAULT_%s_EXE' % self.become_method.upper(), None) or \
self.become_method
# set flags to use for the privilege escalation method, with various overrides
flags = self.become_flags or \
getattr(self, '%s_flags' % self.become_method, None) or \
C.DEFAULT_BECOME_FLAGS or \
getattr(C, 'DEFAULT_%s_FLAGS' % self.become_method.upper(), None) or \
''
if self.become_method == 'sudo':
# If we have a password, we run sudo with a randomly-generated
# prompt set using -p. Otherwise we run it with -n, which makes
# it fail if it would have prompted for a password.
#
# Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote()
# and pass the quoted string to the user's shell.
# force quick error if password is required but not supplied, should prevent sudo hangs.
if self.become_pass:
prompt = '[sudo via ansible, key=%s] password: ' % randbits
becomecmd = '%s %s -p "%s" -S -u %s %s -c %s' % (exe, flags, prompt, self.become_user, executable, success_cmd)
else:
becomecmd = '%s %s -n -S -u %s %s -c %s' % (exe, flags, self.become_user, executable, success_cmd)
elif self.become_method == 'su':
def detect_su_prompt(data):
SU_PROMPT_LOCALIZATIONS_RE = re.compile("|".join(['(\w+\'s )?' + x + ' ?: ?' for x in SU_PROMPT_LOCALIZATIONS]), flags=re.IGNORECASE)
return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data))
prompt = detect_su_prompt
becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, success_cmd)
elif self.become_method == 'pbrun':
prompt='assword:'
becomecmd = '%s -b %s -u %s %s' % (exe, flags, self.become_user, success_cmd)
elif self.become_method == 'pfexec':
# No user as it uses it's own exec_attr to figure it out
becomecmd = '%s %s "%s"' % (exe, flags, success_cmd)
elif self.become_method == 'runas':
raise AnsibleError("'runas' is not yet implemented")
#TODO: figure out prompt
# this is not for use with winrm plugin but if they ever get ssh native on windoez
becomecmd = '%s %s /user:%s "%s"' % (exe, flags, self.become_user, success_cmd)
elif self.become_method == 'doas':
prompt = 'Password:'
exe = self.become_exe or 'doas'
if not self.become_pass:
flags += ' -n '
if self.become_user:
flags += ' -u %s ' % self.become_user
becomecmd = '%s %s echo %s && %s %s env ANSIBLE=true %s' % (exe, flags, success_key, exe, flags, cmd)
else:
raise AnsibleError("Privilege escalation method not found: %s" % self.become_method)
if self.become_pass:
self.prompt = prompt
self.success_key = success_key
return ('%s -c %s' % (executable, pipes.quote(becomecmd)))
return cmd
def update_vars(self, variables):
'''
Adds 'magic' variables relating to connections to the variable dictionary provided.
In case users need to access from the play, this is a legacy from runner.
'''
#FIXME: remove password? possibly add become/sudo settings
for special_var in ['ansible_connection', 'ansible_ssh_host', 'ansible_ssh_pass', 'ansible_ssh_port', 'ansible_ssh_user', 'ansible_ssh_private_key_file', 'ansible_ssh_pipelining']:
if special_var not in variables:
for prop, varnames in MAGIC_VARIABLE_MAPPING.items():
if special_var in varnames:
variables[special_var] = getattr(self, prop)
| gpl-3.0 |
manipopopo/tensorflow | tensorflow/python/keras/layers/local.py | 10 | 18583 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Locally-connected layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import InputSpec
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.layers.LocallyConnected1D')
class LocallyConnected1D(Layer):
"""Locally-connected layer for 1D inputs.
The `LocallyConnected1D` layer works similarly to
the `Conv1D` layer, except that weights are unshared,
that is, a different set of filters is applied at each different patch
of the input.
Example:
```python
# apply a unshared weight convolution 1d of length 3 to a sequence with
# 10 timesteps, with 64 output filters
model = Sequential()
model.add(LocallyConnected1D(64, 3, input_shape=(10, 32)))
# now model.output_shape == (None, 8, 64)
# add a new conv1d on top
model.add(LocallyConnected1D(32, 3))
# now model.output_shape == (None, 6, 32)
```
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: Currently only supports `"valid"` (case-insensitive).
`"same"` may be supported in the future.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, length)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(LocallyConnected1D, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.padding != 'valid':
raise ValueError('Invalid border mode for LocallyConnected1D '
'(only "valid" is supported): ' + padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=3)
@tf_utils.shape_type_conversion
def build(self, input_shape):
if self.data_format == 'channels_first':
input_dim, input_length = input_shape[1], input_shape[2]
else:
input_dim, input_length = input_shape[2], input_shape[1]
if input_dim is None:
raise ValueError('Axis 2 of input should be fully-defined. '
'Found shape:', input_shape)
self.output_length = conv_utils.conv_output_length(
input_length, self.kernel_size[0], self.padding, self.strides[0])
self.kernel_shape = (self.output_length, self.kernel_size[0] * input_dim,
self.filters)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.output_length, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
if self.data_format == 'channels_first':
self.input_spec = InputSpec(ndim=3, axes={1: input_dim})
else:
self.input_spec = InputSpec(ndim=3, axes={-1: input_dim})
self.built = True
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
input_length = input_shape[2]
else:
input_length = input_shape[1]
length = conv_utils.conv_output_length(input_length, self.kernel_size[0],
self.padding, self.strides[0])
if self.data_format == 'channels_first':
return (input_shape[0], self.filters, length)
elif self.data_format == 'channels_last':
return (input_shape[0], length, self.filters)
def call(self, inputs):
output = K.local_conv(inputs, self.kernel, self.kernel_size, self.strides,
(self.output_length,), self.data_format)
if self.use_bias:
output = K.bias_add(output, self.bias, data_format=self.data_format)
output = self.activation(output)
return output
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint)
}
base_config = super(LocallyConnected1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.LocallyConnected2D')
class LocallyConnected2D(Layer):
"""Locally-connected layer for 2D inputs.
The `LocallyConnected2D` layer works similarly
to the `Conv2D` layer, except that weights are unshared,
that is, a different set of filters is applied at each
different patch of the input.
Examples:
```python
# apply a 3x3 unshared weights convolution with 64 output filters on a
32x32 image
# with `data_format="channels_last"`:
model = Sequential()
model.add(LocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))
# now model.output_shape == (None, 30, 30, 64)
# notice that this layer will consume (30*30)*(3*3*3*64) + (30*30)*64
parameters
# add a 3x3 unshared weights convolution on top, with 32 output filters:
model.add(LocallyConnected2D(32, (3, 3)))
# now model.output_shape == (None, 28, 28, 32)
```
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: Currently only support `"valid"` (case-insensitive).
`"same"` will be supported in future.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(LocallyConnected2D, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.padding != 'valid':
raise ValueError('Invalid border mode for LocallyConnected2D '
'(only "valid" is supported): ' + padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=4)
@tf_utils.shape_type_conversion
def build(self, input_shape):
if self.data_format == 'channels_last':
input_row, input_col = input_shape[1:-1]
input_filter = input_shape[3]
else:
input_row, input_col = input_shape[2:]
input_filter = input_shape[1]
if input_row is None or input_col is None:
raise ValueError('The spatial dimensions of the inputs to '
' a LocallyConnected2D layer '
'should be fully-defined, but layer received '
'the inputs shape ' + str(input_shape))
output_row = conv_utils.conv_output_length(input_row, self.kernel_size[0],
self.padding, self.strides[0])
output_col = conv_utils.conv_output_length(input_col, self.kernel_size[1],
self.padding, self.strides[1])
self.output_row = output_row
self.output_col = output_col
self.kernel_shape = (
output_row * output_col,
self.kernel_size[0] * self.kernel_size[1] * input_filter, self.filters)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(output_row, output_col, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
if self.data_format == 'channels_first':
self.input_spec = InputSpec(ndim=4, axes={1: input_filter})
else:
self.input_spec = InputSpec(ndim=4, axes={-1: input_filter})
self.built = True
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding, self.strides[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding, self.strides[1])
if self.data_format == 'channels_first':
return (input_shape[0], self.filters, rows, cols)
elif self.data_format == 'channels_last':
return (input_shape[0], rows, cols, self.filters)
def call(self, inputs):
output = K.local_conv(inputs, self.kernel, self.kernel_size, self.strides,
(self.output_row, self.output_col), self.data_format)
if self.use_bias:
output = K.bias_add(output, self.bias, data_format=self.data_format)
output = self.activation(output)
return output
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint)
}
base_config = super(LocallyConnected2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| apache-2.0 |
jcrudy/sklearntools | sklearntools/feature_selection.py | 1 | 15903 | import numpy as np
from sklearn.base import MetaEstimatorMixin, is_classifier, clone,\
TransformerMixin
from .sklearntools import STSimpleEstimator, _fit_and_score, DelegatingEstimator,\
BaseDelegatingEstimator, standard_methods
from sklearn.cross_validation import check_cv
from sklearn.metrics.scorer import check_scoring
from sklearn.externals.joblib.parallel import Parallel, delayed
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.utils import safe_mask
def weighted_average_score_combine(scores):
scores_arr = np.array([tup[:2] for tup in scores])
return np.average(scores_arr[:,0], weights=scores_arr[:,1])
def check_score_combiner(estimator, score_combiner):
if score_combiner is None:
return weighted_average_score_combine
else:
raise NotImplementedError('Score combiner %s not implemented' % str(score_combiner))
# TODO: Remove all CV stuff from this. Instead, rely on composition with CrossValidatingEstimator
class BaseFeatureImportanceEstimatorCV(BaseDelegatingEstimator):
def __init__(self, estimator, cv=None, scoring=None,
score_combiner=None, n_jobs=1, verbose=0, pre_dispatch='2*n_jobs'):
self.estimator = estimator
self.cv = cv
self.scoring = scoring
# self.check_constant_model = check_constant_model
self.score_combiner = score_combiner
self.n_jobs = n_jobs
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self._create_delegates('estimator', standard_methods)
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y=None, sample_weight=None, exposure=None):
cv = check_cv(self.cv, X=X, y=y, classifier=is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
combiner = check_score_combiner(self.estimator, self.score_combiner)
parallel = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch)
n_features = X.shape[1]
data = self._process_args(X=X, y=y, sample_weight=sample_weight, exposure=exposure)
feature_deletion_scores = []
# Get cross-validated scores with all features present
data_ = data.copy()
col_X = self._baseline_feature_subset(X, n_features)
data['X'] = col_X
full_scores = parallel(delayed(_fit_and_score)(clone(self.estimator), data_, scorer,
train, test)
for train, test in cv)
self.score_ = combiner(full_scores)
# For each feature, remove that feature and get the cross-validation scores
for col in range(n_features):
col_X = self._feature_subset(X, n_features, col)
data_ = data.copy()
data_['X'] = col_X
scores = parallel(delayed(_fit_and_score)(clone(self.estimator), data_, scorer,
train, test)
for train, test in cv)
# test_features = np.ones(shape=n_features, dtype=bool)
# if col_X is not None:
# data_ = data.copy()
# data_['X'] = col_X
# scores = parallel(delayed(_fit_and_score)(clone(self.estimator), data_, scorer,
# train, test)
# for train, test in cv)
#
#
# if n_features > 1:
# test_features[col] = False
# data_['X'] = X[:, test_features]
# scores = parallel(delayed(_fit_and_score)(clone(self.estimator), data_, scorer,
# train, test)
# for train, test in cv)
# elif self.check_constant_model:
# # If there's only one feature to begin with, do the fitting and scoring on a
# # constant predictor.
# data_['X'] = np.ones(shape=(X.shape[0], 1))
# scores = parallel(delayed(_fit_and_score)(clone(self.estimator), data_, scorer,
# train, test)
# for train, test in cv)
# else:
# scores = full_scores
score = combiner(scores)
feature_deletion_scores.append(score)
# Higher scores are better. Higher feature importance means the feature is more important.
# This code reconciles these facts.
self.feature_importances_ = self._calc_importances(np.array(feature_deletion_scores), self.score_)
# Finally, fit on the full data set
self.estimator_ = clone(self.estimator).fit(**data)
# A fit method should always return self for chaining purposes
return self
def predict(self, X, *args, **kwargs):
return self.estimator_.predict(X, *args, **kwargs)
def score(self, X, y, sample_weight=None):
scorer = check_scoring(self.estimator, scoring=self.scoring)
return scorer(self, X, y, sample_weight)
class SingleEliminationFeatureImportanceEstimatorCV(BaseFeatureImportanceEstimatorCV):
def _calc_importances(self, scores, baseline_score):
return baseline_score - scores
def _baseline_feature_subset(self, X, n_features):
return X
def _feature_subset(self, X, n_features, col):
if n_features > 1:
mask = np.ones(shape=n_features, dtype=bool)
mask[col] = False
return X[:, mask]
else:
return np.ones(shape=(X.shape[0], 1))
class UnivariateFeatureImportanceEstimatorCV(BaseFeatureImportanceEstimatorCV):
def _calc_importances(self, scores, baseline_score):
return scores
def _baseline_feature_subset(self, X, n_features):
return X
def _feature_subset(self, X, n_features, col):
mask = np.zeros(shape=n_features, dtype=bool)
mask[col] = True
return X[:, mask]
class STSelector(STSimpleEstimator, SelectorMixin, MetaEstimatorMixin, TransformerMixin):
# Override transform method from SelectorMixin because it doesn't handle the
# case of selecting zero features the way I want it to.
def transform(self, X, exposure=None):
"""Reduce X to the selected features.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
X_r : array of shape [n_samples, n_selected_features]
The input samples with only the selected features.
"""
mask = self.get_support()
if not mask.any():
return np.ones(shape=(X.shape[0], 1))
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return X[:, safe_mask(X, mask)]
@if_delegate_has_method(delegate='estimator')
def predict(self, X, exposure=None):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
args = self._process_args(X=X, exposure=exposure)
args['X'] = self.transform(args['X'])
return self.estimator_.predict(**args)
@if_delegate_has_method(delegate='estimator')
def score(self, X, y=None, sample_weight=None, exposure=None):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
args = self._process_args(X=X, y=y, sample_weight=sample_weight,
exposure=exposure)
args['X'] = self.transform(args['X'])
return self.estimator_.score(**args)
def _get_support_mask(self):
return self.support_
@property
def _estimator_type(self):
return self.estimator._estimator_type
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X, exposure=None):
args = self._process_args(X=X, exposure=exposure)
args['X'] = self.transform(args['X'])
return self.estimator_.decision_function(**args)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X, exposure=None):
args = self._process_args(X=X, exposure=exposure)
args['X'] = self.transform(args['X'])
return self.estimator_.predict_proba(**args)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X, exposure=None):
args = self._process_args(X=X, exposure=exposure)
args['X'] = self.transform(args['X'])
return self.estimator_.predict_log_proba(**args)
# class ForwardStepwiseFeatureSelector(STSelector, MetaEstimatorMixin):
# def __init__(self, estimator, scoring=None, check_constant_model=True):
# self.estimator = estimator
# self.scoring = scoring
# self.check_constant_model = check_constant_model
#
# def fit(self, X, y, sample_weight=None, exposure=None):
# scorer = check_scoring(self.estimator, scoring=self.scoring)
# n_features = 0 if self.check_constant_model else 1
# args = self._process_args(X=X, y=y, sample_weight=sample_weight,
# exposure=exposure)
#
# support = np.zeros(shape=n_features, dtype=bool)
# best_score = -float('inf')
# best_support = None
# best_n_features = None
# sequence = []
# scores = []
#
# if self.check_constant_model:
# args_ = args.copy()
# args_['X'] = np.ones(shape=(X.shape[0],1))
# # Fit the estimator
# estimator = clone(self.estimator).fit(**args)
#
# # Score the estimator
# if self.scoring is None and hasattr(estimator, 'score_'):
# score = estimator.score_
# else:
# score = scorer(estimator, X, y, sample_weight)
#
# # Compare to previous tries
# if score > best_score:
# best_score = score
# best_support = np.zeros_like(support)
# best_n_features = 0
# scores.append(score)
#
# max_features = X.shape[1]
# while np.sum(support) <= max_features:
# args_ = args.copy()
# args_['X'] = X[:, support]
#
# # Fit the estimator
# estimator = clone(self.estimator).fit(**args)
#
# # Score the estimator
# if self.scoring is None and hasattr(estimator, 'score_'):
# score = estimator.score_
# else:
# score = scorer(estimator, X, y, sample_weight)
# scores.append(score)
#
# # Compare to previous tries
# if score > best_score:
# best_score = score
# best_support = support.copy()
# best_n_features = np.sum(support)
#
# # Remove the least important feature from the support for next time
# best_feature = np.argmax(estimator.feature_importances_)
# best_feature_idx = np.argwhere(support)[best_feature][0]
# support[best_feature] = True
# sequence.append(best_feature_idx)
#
class BestKFeatureSelector(STSelector):
def __init__(self, estimator, k):
self.estimator = estimator
self.k = k
def fit(self, X, y=None, sample_weight=None, exposure=None):
args = self._process_args(X=X, y=y, sample_weight=sample_weight,
exposure=exposure)
self.estimator_ = clone(self.estimator).fit(**args)
k_best = np.argsort(self.estimator_.feature_importances_)[::-1][:self.k]
self.support_ = np.zeros(shape=X.shape[1], dtype=bool)
self.support_[k_best] = True
return self
class BackwardEliminationEstimator(STSelector):
def __init__(self, estimator, scoring=None, check_constant_model=True):
self.estimator = estimator
self.scoring = scoring
self.check_constant_model = check_constant_model
def fit(self, X, y=None, sample_weight=None, exposure=None):
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
# sample_weight = kwargs.get('sample_weight', None)
# Do stepwise backward elimination to find best feature set
support = np.ones(shape=n_features, dtype=bool)
best_score = -float('inf')
best_support = None
best_n_features = None
elimination = []
scores = []
fit_args = self._process_args(X=X, y=y, sample_weight=sample_weight,
exposure=exposure)
while np.sum(support) >= 1:
# Fit the estimator
args = fit_args.copy()
args['X'] = X[:, support]
estimator = clone(self.estimator).fit(**args)
# Score the estimator
if self.scoring is None and hasattr(estimator, 'score_'):
score = estimator.score_
else:
score = scorer(estimator, **args)
scores.append(score)
# Compare to previous tries
if score > best_score:
best_score = score
best_support = support.copy()
best_n_features = np.sum(support)
# Remove the least important feature from the support for next time
worst_feature = np.argmin(estimator.feature_importances_)
worst_feature_idx = np.argwhere(support)[worst_feature][0]
support[worst_feature_idx] = False
elimination.append(worst_feature_idx)
# Score a constant input model in case it's the best choice.
# (This would mean the predictors are essentially useless.)
if self.check_constant_model:
# Fit the estimator
args = fit_args.copy()
args['X'] = np.ones(shape=(X.shape[0],1))
estimator = clone(self.estimator).fit(**args)
# Score the estimator
if self.scoring is None and hasattr(estimator, 'score_'):
score = estimator.score_
else:
score = scorer(estimator, **args)
# Compare to previous tries
if score > best_score:
best_score = score
best_support = np.zeros_like(support)
best_n_features = 0
scores.append(score)
# Set attributes for best feature set
self.n_input_features_ = n_features
self.n_features_ = best_n_features
self.support_ = best_support
self.elimination_sequence_ = np.array(elimination)
self.scores_ = np.array(scores)
# Finally, fit on the full data set with the selected set of features
args = fit_args.copy()
args['X'] = X[:, self.support_]
self.estimator_ = clone(self.estimator).fit(**args)
return self
| bsd-3-clause |
haipersist/webspider | spider/jobspider/pipelines/down_image.py | 1 | 1075 | __author__ = 'BJHaibo'
import os
import scrapy
# from scrapy.spider import Request
from scrapy.pipelines.images import ImagesPipeline
from scrapy.exceptions import DropItem
class MyImagePipeline(ImagesPipeline):
def __init__(self,store_uri,download_func=None):
# store_uri is automatically set from setting.py :IMAGE_STORE
self.store_path = store_uri
super(MyImagePipeline,self).__init__(store_uri,download_func=None)
def get_media_requests(self, item, info):
url = item.get('url',None)
if url is not None:
yield scrapy.Request(url)
#when the scrapy.Request finished downloading ,the method will be called
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok,x in results if ok]
if image_paths:
image_path = image_paths[0]
item['image_path'] = os.path.join(os.path.abspath(self.store_path)
,image_path) if image_path else ''
return item
| mit |
CapOM/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/third_party/boto/boto/cloudformation/connection.py | 127 | 42922 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from boto.cloudformation.stack import Stack, StackSummary, StackEvent
from boto.cloudformation.stack import StackResource, StackResourceSummary
from boto.cloudformation.template import Template
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.compat import json
class CloudFormationConnection(AWSQueryConnection):
"""
AWS CloudFormation
AWS CloudFormation enables you to create and manage AWS
infrastructure deployments predictably and repeatedly. AWS
CloudFormation helps you leverage AWS products such as Amazon EC2,
EBS, Amazon SNS, ELB, and Auto Scaling to build highly-reliable,
highly scalable, cost effective applications without worrying
about creating and configuring the underlying AWS infrastructure.
With AWS CloudFormation, you declare all of your resources and
dependencies in a template file. The template defines a collection
of resources as a single unit called a stack. AWS CloudFormation
creates and deletes all member resources of the stack together and
manages all dependencies between the resources for you.
For more information about this product, go to the `CloudFormation
Product Page`_.
Amazon CloudFormation makes use of other AWS products. If you need
additional technical information about a specific AWS product, you
can find the product's technical documentation at
`http://aws.amazon.com/documentation/`_.
"""
APIVersion = boto.config.get('Boto', 'cfn_version', '2010-05-15')
DefaultRegionName = boto.config.get('Boto', 'cfn_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'cfn_region_endpoint',
'cloudformation.us-east-1.amazonaws.com')
valid_states = (
'CREATE_IN_PROGRESS', 'CREATE_FAILED', 'CREATE_COMPLETE',
'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'ROLLBACK_COMPLETE',
'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE',
'UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_IN_PROGRESS',
'UPDATE_ROLLBACK_FAILED',
'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_ROLLBACK_COMPLETE')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
converter=None, security_token=None, validate_certs=True,
profile_name=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint, CloudFormationConnection)
self.region = region
super(CloudFormationConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def encode_bool(self, v):
v = bool(v)
return {True: "true", False: "false"}[v]
def _build_create_or_update_params(self, stack_name, template_body,
template_url, parameters, disable_rollback, timeout_in_minutes,
notification_arns, capabilities, on_failure, stack_policy_body,
stack_policy_url, tags, use_previous_template=None,
stack_policy_during_update_body=None,
stack_policy_during_update_url=None):
"""
Helper that creates JSON parameters needed by a Stack Create or
Stack Update call.
:type stack_name: string
:param stack_name:
The name associated with the stack. The name must be unique within your
AWS account.
Must contain only alphanumeric characters (case sensitive) and start
with an alpha character. Maximum length of the name is 255
characters.
:type template_body: string
:param template_body: Structure containing the template body. (For more
information, go to `Template Anatomy`_ in the AWS CloudFormation
User Guide.)
Conditional: You must pass either `UsePreviousTemplate` or one of
`TemplateBody` or `TemplateUrl`. If both `TemplateBody` and
`TemplateUrl` are passed, only `TemplateBody` is used.
`TemplateBody`.
:type template_url: string
:param template_url: Location of file containing the template body. The
URL must point to a template (max size: 307,200 bytes) located in
an S3 bucket in the same region as the stack. For more information,
go to the `Template Anatomy`_ in the AWS CloudFormation User Guide.
Conditional: You must pass either `UsePreviousTemplate` or one of
`TemplateBody` or `TemplateUrl`. If both `TemplateBody` and
`TemplateUrl` are passed, only `TemplateBody` is used.
`TemplateBody`.
:type parameters: list
:param parameters: A list of key/value tuples that specify input
parameters for the stack. A 3-tuple (key, value, bool) may be used to
specify the `UsePreviousValue` option.
:type disable_rollback: boolean
:param disable_rollback: Set to `True` to disable rollback of the stack
if stack creation failed. You can specify either `DisableRollback`
or `OnFailure`, but not both.
Default: `False`
:type timeout_in_minutes: integer
:param timeout_in_minutes: The amount of time that can pass before the
stack status becomes CREATE_FAILED; if `DisableRollback` is not set
or is set to `False`, the stack will be rolled back.
:type notification_arns: list
:param notification_arns: The Simple Notification Service (SNS) topic
ARNs to publish stack related events. You can find your SNS topic
ARNs using the `SNS console`_ or your Command Line Interface (CLI).
:type capabilities: list
:param capabilities: The list of capabilities that you want to allow in
the stack. If your template contains certain resources, you must
specify the CAPABILITY_IAM value for this parameter; otherwise,
this action returns an InsufficientCapabilities error. The
following resources require you to specify the capabilities
parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_,
`AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_,
`AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and
`AWS::IAM::UserToGroupAddition`_.
:type on_failure: string
:param on_failure: Determines what action will be taken if stack
creation fails. This must be one of: DO_NOTHING, ROLLBACK, or
DELETE. You can specify either `OnFailure` or `DisableRollback`,
but not both.
Default: `ROLLBACK`
:type stack_policy_body: string
:param stack_policy_body: Structure containing the stack policy body.
(For more information, go to ` Prevent Updates to Stack Resources`_
in the AWS CloudFormation User Guide.)
If you pass `StackPolicyBody` and `StackPolicyURL`, only
`StackPolicyBody` is used.
:type stack_policy_url: string
:param stack_policy_url: Location of a file containing the stack
policy. The URL must point to a policy (max size: 16KB) located in
an S3 bucket in the same region as the stack. If you pass
`StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is
used.
:type tags: list
:param tags: A set of user-defined `Tags` to associate with this stack,
represented by key/value pairs. Tags defined for the stack are
propagated to EC2 resources that are created as part of the stack.
A maximum number of 10 tags can be specified.
:type use_previous_template: boolean
:param use_previous_template: Set to `True` to use the previous
template instead of uploading a new one via `TemplateBody` or
`TemplateURL`.
Conditional: You must pass either `UsePreviousTemplate` or one of
`TemplateBody` or `TemplateUrl`.
:type stack_policy_during_update_body: string
:param stack_policy_during_update_body: Structure containing the
temporary overriding stack policy body. If you pass
`StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`,
only `StackPolicyDuringUpdateBody` is used.
If you want to update protected resources, specify a temporary
overriding stack policy during this update. If you do not specify a
stack policy, the current policy that associated with the stack
will be used.
:type stack_policy_during_update_url: string
:param stack_policy_during_update_url: Location of a file containing
the temporary overriding stack policy. The URL must point to a
policy (max size: 16KB) located in an S3 bucket in the same region
as the stack. If you pass `StackPolicyDuringUpdateBody` and
`StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is
used.
If you want to update protected resources, specify a temporary
overriding stack policy during this update. If you do not specify a
stack policy, the current policy that is associated with the stack
will be used.
:rtype: dict
:return: JSON parameters represented as a Python dict.
"""
params = {'ContentType': "JSON", 'StackName': stack_name,
'DisableRollback': self.encode_bool(disable_rollback)}
if template_body:
params['TemplateBody'] = template_body
if template_url:
params['TemplateURL'] = template_url
if use_previous_template is not None:
params['UsePreviousTemplate'] = self.encode_bool(use_previous_template)
if template_body and template_url:
boto.log.warning("If both TemplateBody and TemplateURL are"
" specified, only TemplateBody will be honored by the API")
if parameters and len(parameters) > 0:
for i, parameter_tuple in enumerate(parameters):
key, value = parameter_tuple[:2]
use_previous = (parameter_tuple[2]
if len(parameter_tuple) > 2 else False)
params['Parameters.member.%d.ParameterKey' % (i + 1)] = key
if use_previous:
params['Parameters.member.%d.UsePreviousValue'
% (i + 1)] = self.encode_bool(use_previous)
else:
params['Parameters.member.%d.ParameterValue' % (i + 1)] = value
if capabilities:
for i, value in enumerate(capabilities):
params['Capabilities.member.%d' % (i + 1)] = value
if tags:
for i, (key, value) in enumerate(tags.items()):
params['Tags.member.%d.Key' % (i + 1)] = key
params['Tags.member.%d.Value' % (i + 1)] = value
if notification_arns and len(notification_arns) > 0:
self.build_list_params(params, notification_arns,
"NotificationARNs.member")
if timeout_in_minutes:
params['TimeoutInMinutes'] = int(timeout_in_minutes)
if disable_rollback is not None:
params['DisableRollback'] = str(
disable_rollback).lower()
if on_failure is not None:
params['OnFailure'] = on_failure
if stack_policy_body is not None:
params['StackPolicyBody'] = stack_policy_body
if stack_policy_url is not None:
params['StackPolicyURL'] = stack_policy_url
if stack_policy_during_update_body is not None:
params['StackPolicyDuringUpdateBody'] = stack_policy_during_update_body
if stack_policy_during_update_url is not None:
params['StackPolicyDuringUpdateURL'] = stack_policy_during_update_url
return params
def _do_request(self, call, params, path, method):
"""
Do a request via ``self.make_request`` and parse the JSON response.
:type call: string
:param call: Call name, e.g. ``CreateStack``
:type params: dict
:param params: Dictionary of call parameters
:type path: string
:param path: Server path
:type method: string
:param method: HTTP method to use
:rtype: dict
:return: Parsed JSON response data
"""
response = self.make_request(call, params, path, method)
body = response.read().decode('utf-8')
if response.status == 200:
body = json.loads(body)
return body
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body=body)
def create_stack(self, stack_name, template_body=None, template_url=None,
parameters=None, notification_arns=None, disable_rollback=None,
timeout_in_minutes=None, capabilities=None, tags=None,
on_failure=None, stack_policy_body=None, stack_policy_url=None):
"""
Creates a stack as specified in the template. After the call
completes successfully, the stack creation starts. You can
check the status of the stack via the DescribeStacks API.
Currently, the limit for stacks is 20 stacks per account per
region.
:type stack_name: string
:param stack_name:
The name associated with the stack. The name must be unique within your
AWS account.
Must contain only alphanumeric characters (case sensitive) and start
with an alpha character. Maximum length of the name is 255
characters.
:type template_body: string
:param template_body: Structure containing the template body. (For more
information, go to `Template Anatomy`_ in the AWS CloudFormation
User Guide.)
Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are
passed, only `TemplateBody` is used.
:type template_url: string
:param template_url: Location of file containing the template body. The
URL must point to a template (max size: 307,200 bytes) located in
an S3 bucket in the same region as the stack. For more information,
go to the `Template Anatomy`_ in the AWS CloudFormation User Guide.
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
passed, only `TemplateBody` is used.
:type parameters: list
:param parameters: A list of key/value tuples that specify input
parameters for the stack.
:type disable_rollback: boolean
:param disable_rollback: Set to `True` to disable rollback of the stack
if stack creation failed. You can specify either `DisableRollback`
or `OnFailure`, but not both.
Default: `False`
:type timeout_in_minutes: integer
:param timeout_in_minutes: The amount of time that can pass before the
stack status becomes CREATE_FAILED; if `DisableRollback` is not set
or is set to `False`, the stack will be rolled back.
:type notification_arns: list
:param notification_arns: The Simple Notification Service (SNS) topic
ARNs to publish stack related events. You can find your SNS topic
ARNs using the `SNS console`_ or your Command Line Interface (CLI).
:type capabilities: list
:param capabilities: The list of capabilities that you want to allow in
the stack. If your template contains certain resources, you must
specify the CAPABILITY_IAM value for this parameter; otherwise,
this action returns an InsufficientCapabilities error. The
following resources require you to specify the capabilities
parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_,
`AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_,
`AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and
`AWS::IAM::UserToGroupAddition`_.
:type on_failure: string
:param on_failure: Determines what action will be taken if stack
creation fails. This must be one of: DO_NOTHING, ROLLBACK, or
DELETE. You can specify either `OnFailure` or `DisableRollback`,
but not both.
Default: `ROLLBACK`
:type stack_policy_body: string
:param stack_policy_body: Structure containing the stack policy body.
(For more information, go to ` Prevent Updates to Stack Resources`_
in the AWS CloudFormation User Guide.)
If you pass `StackPolicyBody` and `StackPolicyURL`, only
`StackPolicyBody` is used.
:type stack_policy_url: string
:param stack_policy_url: Location of a file containing the stack
policy. The URL must point to a policy (max size: 16KB) located in
an S3 bucket in the same region as the stack. If you pass
`StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is
used.
:type tags: dict
:param tags: A set of user-defined `Tags` to associate with this stack,
represented by key/value pairs. Tags defined for the stack are
propagated to EC2 resources that are created as part of the stack.
A maximum number of 10 tags can be specified.
"""
params = self._build_create_or_update_params(stack_name, template_body,
template_url, parameters, disable_rollback, timeout_in_minutes,
notification_arns, capabilities, on_failure, stack_policy_body,
stack_policy_url, tags)
body = self._do_request('CreateStack', params, '/', 'POST')
return body['CreateStackResponse']['CreateStackResult']['StackId']
def update_stack(self, stack_name, template_body=None, template_url=None,
parameters=None, notification_arns=None, disable_rollback=False,
timeout_in_minutes=None, capabilities=None, tags=None,
use_previous_template=None,
stack_policy_during_update_body=None,
stack_policy_during_update_url=None,
stack_policy_body=None, stack_policy_url=None):
"""
Updates a stack as specified in the template. After the call
completes successfully, the stack update starts. You can check
the status of the stack via the DescribeStacks action.
**Note: **You cannot update `AWS::S3::Bucket`_ resources, for
example, to add or modify tags.
To get a copy of the template for an existing stack, you can
use the GetTemplate action.
Tags that were associated with this stack during creation time
will still be associated with the stack after an `UpdateStack`
operation.
For more information about creating an update template,
updating a stack, and monitoring the progress of the update,
see `Updating a Stack`_.
:type stack_name: string
:param stack_name:
The name or stack ID of the stack to update.
Must contain only alphanumeric characters (case sensitive) and start
with an alpha character. Maximum length of the name is 255
characters.
:type template_body: string
:param template_body: Structure containing the template body. (For more
information, go to `Template Anatomy`_ in the AWS CloudFormation
User Guide.)
Conditional: You must pass either `UsePreviousTemplate` or one of
`TemplateBody` or `TemplateUrl`. If both `TemplateBody` and
`TemplateUrl` are passed, only `TemplateBody` is used.
:type template_url: string
:param template_url: Location of file containing the template body. The
URL must point to a template (max size: 307,200 bytes) located in
an S3 bucket in the same region as the stack. For more information,
go to the `Template Anatomy`_ in the AWS CloudFormation User Guide.
Conditional: You must pass either `UsePreviousTemplate` or one of
`TemplateBody` or `TemplateUrl`. If both `TemplateBody` and
`TemplateUrl` are passed, only `TemplateBody` is used.
`TemplateBody`.
:type use_previous_template: boolean
:param use_previous_template: Set to `True` to use the previous
template instead of uploading a new one via `TemplateBody` or
`TemplateURL`.
Conditional: You must pass either `UsePreviousTemplate` or one of
`TemplateBody` or `TemplateUrl`.
:type parameters: list
:param parameters: A list of key/value tuples that specify input
parameters for the stack. A 3-tuple (key, value, bool) may be used to
specify the `UsePreviousValue` option.
:type notification_arns: list
:param notification_arns: The Simple Notification Service (SNS) topic
ARNs to publish stack related events. You can find your SNS topic
ARNs using the `SNS console`_ or your Command Line Interface (CLI).
:type disable_rollback: bool
:param disable_rollback: Indicates whether or not to rollback on
failure.
:type timeout_in_minutes: integer
:param timeout_in_minutes: The amount of time that can pass before the
stack status becomes CREATE_FAILED; if `DisableRollback` is not set
or is set to `False`, the stack will be rolled back.
:type capabilities: list
:param capabilities: The list of capabilities you want to allow in
the stack. Currently, the only valid capability is
'CAPABILITY_IAM'.
:type tags: dict
:param tags: A set of user-defined `Tags` to associate with this stack,
represented by key/value pairs. Tags defined for the stack are
propagated to EC2 resources that are created as part of the stack.
A maximum number of 10 tags can be specified.
:type template_url: string
:param template_url: Location of file containing the template body. The
URL must point to a template located in an S3 bucket in the same
region as the stack. For more information, go to `Template
Anatomy`_ in the AWS CloudFormation User Guide.
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
passed, only `TemplateBody` is used.
:type stack_policy_during_update_body: string
:param stack_policy_during_update_body: Structure containing the
temporary overriding stack policy body. If you pass
`StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`,
only `StackPolicyDuringUpdateBody` is used.
If you want to update protected resources, specify a temporary
overriding stack policy during this update. If you do not specify a
stack policy, the current policy that associated with the stack
will be used.
:type stack_policy_during_update_url: string
:param stack_policy_during_update_url: Location of a file containing
the temporary overriding stack policy. The URL must point to a
policy (max size: 16KB) located in an S3 bucket in the same region
as the stack. If you pass `StackPolicyDuringUpdateBody` and
`StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is
used.
If you want to update protected resources, specify a temporary
overriding stack policy during this update. If you do not specify a
stack policy, the current policy that is associated with the stack
will be used.
:rtype: string
:return: The unique Stack ID.
"""
params = self._build_create_or_update_params(stack_name, template_body,
template_url, parameters, disable_rollback, timeout_in_minutes,
notification_arns, capabilities, None, stack_policy_body,
stack_policy_url, tags, use_previous_template,
stack_policy_during_update_body, stack_policy_during_update_url)
body = self._do_request('UpdateStack', params, '/', 'POST')
return body['UpdateStackResponse']['UpdateStackResult']['StackId']
def delete_stack(self, stack_name_or_id):
"""
Deletes a specified stack. Once the call completes
successfully, stack deletion starts. Deleted stacks do not
show up in the DescribeStacks API if the deletion has been
completed successfully.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack.
"""
params = {'ContentType': "JSON", 'StackName': stack_name_or_id}
return self._do_request('DeleteStack', params, '/', 'GET')
def describe_stack_events(self, stack_name_or_id=None, next_token=None):
"""
Returns all stack related events for a specified stack. For
more information about a stack's event history, go to
`Stacks`_ in the AWS CloudFormation User Guide.
Events are returned, even if the stack never existed or has
been successfully deleted.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack.
Default: There is no default value.
:type next_token: string
:param next_token: String that identifies the start of the next list of
events, if there is one.
Default: There is no default value.
"""
params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeStackEvents', params, [('member',
StackEvent)])
def describe_stack_resource(self, stack_name_or_id, logical_resource_id):
"""
Returns a description of the specified resource in the
specified stack.
For deleted stacks, DescribeStackResource returns resource
information for up to 90 days after the stack has been
deleted.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack.
Default: There is no default value.
:type logical_resource_id: string
:param logical_resource_id: The logical name of the resource as
specified in the template.
Default: There is no default value.
"""
params = {'ContentType': "JSON", 'StackName': stack_name_or_id,
'LogicalResourceId': logical_resource_id}
return self._do_request('DescribeStackResource', params, '/', 'GET')
def describe_stack_resources(self, stack_name_or_id=None,
logical_resource_id=None,
physical_resource_id=None):
"""
Returns AWS resource descriptions for running and deleted
stacks. If `StackName` is specified, all the associated
resources that are part of the stack are returned. If
`PhysicalResourceId` is specified, the associated resources of
the stack that the resource belongs to are returned.
Only the first 100 resources will be returned. If your stack
has more resources than this, you should use
`ListStackResources` instead.
For deleted stacks, `DescribeStackResources` returns resource
information for up to 90 days after the stack has been
deleted.
You must specify either `StackName` or `PhysicalResourceId`,
but not both. In addition, you can specify `LogicalResourceId`
to filter the returned result. For more information about
resources, the `LogicalResourceId` and `PhysicalResourceId`,
go to the `AWS CloudFormation User Guide`_.
A `ValidationError` is returned if you specify both
`StackName` and `PhysicalResourceId` in the same request.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack.
Required: Conditional. If you do not specify `StackName`, you must
specify `PhysicalResourceId`.
Default: There is no default value.
:type logical_resource_id: string
:param logical_resource_id: The logical name of the resource as
specified in the template.
Default: There is no default value.
:type physical_resource_id: string
:param physical_resource_id: The name or unique identifier that
corresponds to a physical instance ID of a resource supported by
AWS CloudFormation.
For example, for an Amazon Elastic Compute Cloud (EC2) instance,
`PhysicalResourceId` corresponds to the `InstanceId`. You can pass
the EC2 `InstanceId` to `DescribeStackResources` to find which
stack the instance belongs to and what other resources are part of
the stack.
Required: Conditional. If you do not specify `PhysicalResourceId`, you
must specify `StackName`.
Default: There is no default value.
"""
params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
if logical_resource_id:
params['LogicalResourceId'] = logical_resource_id
if physical_resource_id:
params['PhysicalResourceId'] = physical_resource_id
return self.get_list('DescribeStackResources', params,
[('member', StackResource)])
def describe_stacks(self, stack_name_or_id=None, next_token=None):
"""
Returns the description for the specified stack; if no stack
name was specified, then it returns the description for all
the stacks created.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack.
Default: There is no default value.
:type next_token: string
:param next_token: String that identifies the start of the next list of
stacks, if there is one.
"""
params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
if next_token is not None:
params['NextToken'] = next_token
return self.get_list('DescribeStacks', params, [('member', Stack)])
def get_template(self, stack_name_or_id):
"""
Returns the template body for a specified stack. You can get
the template for running or deleted stacks.
For deleted stacks, GetTemplate returns the template for up to
90 days after the stack has been deleted.
If the template does not exist, a `ValidationError` is
returned.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack, which are not always interchangeable:
+ Running stacks: You can specify either the stack's name or its unique
stack ID.
+ Deleted stacks: You must specify the unique stack ID.
Default: There is no default value.
"""
params = {'ContentType': "JSON", 'StackName': stack_name_or_id}
return self._do_request('GetTemplate', params, '/', 'GET')
def list_stack_resources(self, stack_name_or_id, next_token=None):
"""
Returns descriptions of all resources of the specified stack.
For deleted stacks, ListStackResources returns resource
information for up to 90 days after the stack has been
deleted.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack, which are not always interchangeable:
+ Running stacks: You can specify either the stack's name or its unique
stack ID.
+ Deleted stacks: You must specify the unique stack ID.
Default: There is no default value.
:type next_token: string
:param next_token: String that identifies the start of the next list of
stack resource summaries, if there is one.
Default: There is no default value.
"""
params = {'StackName': stack_name_or_id}
if next_token:
params['NextToken'] = next_token
return self.get_list('ListStackResources', params,
[('member', StackResourceSummary)])
def list_stacks(self, stack_status_filters=None, next_token=None):
"""
Returns the summary information for stacks whose status
matches the specified StackStatusFilter. Summary information
for stacks that have been deleted is kept for 90 days after
the stack is deleted. If no StackStatusFilter is specified,
summary information for all stacks is returned (including
existing stacks and stacks that have been deleted).
:type next_token: string
:param next_token: String that identifies the start of the next list of
stacks, if there is one.
Default: There is no default value.
:type stack_status_filter: list
:param stack_status_filter: Stack status to use as a filter. Specify
one or more stack status codes to list only stacks with the
specified status codes. For a complete list of stack status codes,
see the `StackStatus` parameter of the Stack data type.
"""
params = {}
if next_token:
params['NextToken'] = next_token
if stack_status_filters and len(stack_status_filters) > 0:
self.build_list_params(params, stack_status_filters,
"StackStatusFilter.member")
return self.get_list('ListStacks', params,
[('member', StackSummary)])
def validate_template(self, template_body=None, template_url=None):
"""
Validates a specified template.
:type template_body: string
:param template_body: String containing the template body. (For more
information, go to `Template Anatomy`_ in the AWS CloudFormation
User Guide.)
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
passed, only `TemplateBody` is used.
:type template_url: string
:param template_url: Location of file containing the template body. The
URL must point to a template (max size: 307,200 bytes) located in
an S3 bucket in the same region as the stack. For more information,
go to `Template Anatomy`_ in the AWS CloudFormation User Guide.
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
passed, only `TemplateBody` is used.
"""
params = {}
if template_body:
params['TemplateBody'] = template_body
if template_url:
params['TemplateURL'] = template_url
if template_body and template_url:
boto.log.warning("If both TemplateBody and TemplateURL are"
" specified, only TemplateBody will be honored by the API")
return self.get_object('ValidateTemplate', params, Template,
verb="POST")
def cancel_update_stack(self, stack_name_or_id=None):
"""
Cancels an update on the specified stack. If the call
completes successfully, the stack will roll back the update
and revert to the previous stack configuration.
Only stacks that are in the UPDATE_IN_PROGRESS state can be
canceled.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated with
the stack.
"""
params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
return self.get_status('CancelUpdateStack', params)
def estimate_template_cost(self, template_body=None, template_url=None,
parameters=None):
"""
Returns the estimated monthly cost of a template. The return
value is an AWS Simple Monthly Calculator URL with a query
string that describes the resources required to run the
template.
:type template_body: string
:param template_body: Structure containing the template body. (For more
information, go to `Template Anatomy`_ in the AWS CloudFormation
User Guide.)
Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are
passed, only `TemplateBody` is used.
:type template_url: string
:param template_url: Location of file containing the template body. The
URL must point to a template located in an S3 bucket in the same
region as the stack. For more information, go to `Template
Anatomy`_ in the AWS CloudFormation User Guide.
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
passed, only `TemplateBody` is used.
:type parameters: list
:param parameters: A list of key/value tuples that specify input
parameters for the template.
:rtype: string
:returns: URL to pre-filled cost calculator
"""
params = {'ContentType': "JSON"}
if template_body is not None:
params['TemplateBody'] = template_body
if template_url is not None:
params['TemplateURL'] = template_url
if parameters and len(parameters) > 0:
for i, (key, value) in enumerate(parameters):
params['Parameters.member.%d.ParameterKey' % (i + 1)] = key
params['Parameters.member.%d.ParameterValue' % (i + 1)] = value
response = self._do_request('EstimateTemplateCost', params, '/', 'POST')
return response['EstimateTemplateCostResponse']\
['EstimateTemplateCostResult']\
['Url']
def get_stack_policy(self, stack_name_or_id):
"""
Returns the stack policy for a specified stack. If a stack
doesn't have a policy, a null value is returned.
:type stack_name_or_id: string
:param stack_name_or_id: The name or stack ID that is associated with
the stack whose policy you want to get.
:rtype: string
:return: The policy JSON document
"""
params = {'ContentType': "JSON", 'StackName': stack_name_or_id, }
response = self._do_request('GetStackPolicy', params, '/', 'POST')
return response['GetStackPolicyResponse']\
['GetStackPolicyResult']\
['StackPolicyBody']
def set_stack_policy(self, stack_name_or_id, stack_policy_body=None,
stack_policy_url=None):
"""
Sets a stack policy for a specified stack.
:type stack_name_or_id: string
:param stack_name_or_id: The name or stack ID that you want to
associate a policy with.
:type stack_policy_body: string
:param stack_policy_body: Structure containing the stack policy body.
(For more information, go to ` Prevent Updates to Stack Resources`_
in the AWS CloudFormation User Guide.)
You must pass `StackPolicyBody` or `StackPolicyURL`. If both are
passed, only `StackPolicyBody` is used.
:type stack_policy_url: string
:param stack_policy_url: Location of a file containing the stack
policy. The URL must point to a policy (max size: 16KB) located in
an S3 bucket in the same region as the stack. You must pass
`StackPolicyBody` or `StackPolicyURL`. If both are passed, only
`StackPolicyBody` is used.
"""
params = {'ContentType': "JSON", 'StackName': stack_name_or_id, }
if stack_policy_body is not None:
params['StackPolicyBody'] = stack_policy_body
if stack_policy_url is not None:
params['StackPolicyURL'] = stack_policy_url
response = self._do_request('SetStackPolicy', params, '/', 'POST')
return response['SetStackPolicyResponse']
| bsd-3-clause |
adnanh/zulip | zerver/lib/bugdown/codehilite.py | 116 | 8441 | """
CodeHilite Extension for Python-Markdown
========================================
Adds code/syntax highlighting to standard Python-Markdown code blocks.
Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/code_hilite.html>
Contact: markdown@freewisdom.org
License: BSD (see ../LICENSE.md for details)
Dependencies:
* [Python 2.3+](http://python.org/)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
* [Pygments](http://pygments.org/)
"""
import markdown
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer, TextLexer
from pygments.formatters import HtmlFormatter
pygments = True
except ImportError:
pygments = False
# ------------------ The Main CodeHilite Class ----------------------
class CodeHilite:
"""
Determine language of source code, and pass it into the pygments hilighter.
Basic Usage:
>>> code = CodeHilite(src = 'some text')
>>> html = code.hilite()
* src: Source string or any object with a .readline attribute.
* force_linenos: (Boolean) Force line numbering 'on' (True) or 'off' (False).
If not specified, number lines iff a shebang line is present.
* guess_lang: (Boolean) Turn language auto-detection 'on' or 'off' (on by default).
* css_class: Set class name of wrapper div ('codehilite' by default).
Low Level Usage:
>>> code = CodeHilite()
>>> code.src = 'some text' # String or anything with a .readline attr.
>>> code.linenos = True # True or False; Turns line numbering on or of.
>>> html = code.hilite()
"""
def __init__(self, src=None, force_linenos=None, guess_lang=True,
css_class="codehilite", lang=None, style='default',
noclasses=False, tab_length=4):
self.src = src
self.lang = lang
self.linenos = force_linenos
self.guess_lang = guess_lang
self.css_class = css_class
self.style = style
self.noclasses = noclasses
self.tab_length = tab_length
def hilite(self):
"""
Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with
optional line numbers. The output should then be styled with css to
your liking. No styles are applied by default - only styling hooks
(i.e.: <span class="k">).
returns : A string of html.
"""
self.src = self.src.strip('\n')
if self.lang is None:
self._getLang()
if pygments:
try:
lexer = get_lexer_by_name(self.lang)
except ValueError:
try:
if self.guess_lang:
lexer = guess_lexer(self.src)
else:
lexer = TextLexer()
except ValueError:
lexer = TextLexer()
formatter = HtmlFormatter(linenos=bool(self.linenos),
cssclass=self.css_class,
style=self.style,
noclasses=self.noclasses)
return highlight(self.src, lexer, formatter)
else:
# just escape and build markup usable by JS highlighting libs
txt = self.src.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
classes = []
if self.lang:
classes.append('language-%s' % self.lang)
if self.linenos:
classes.append('linenums')
class_str = ''
if classes:
class_str = ' class="%s"' % ' '.join(classes)
return '<pre class="%s"><code%s>%s</code></pre>\n'% \
(self.css_class, class_str, txt)
def _getLang(self):
"""
Determines language of a code block from shebang line and whether said
line should be removed or left in place. If the sheband line contains a
path (even a single /) then it is assumed to be a real shebang line and
left alone. However, if no path is given (e.i.: #!python or :::python)
then it is assumed to be a mock shebang for language identifitation of a
code fragment and removed from the code block prior to processing for
code highlighting. When a mock shebang (e.i: #!python) is found, line
numbering is turned on. When colons are found in place of a shebang
(e.i.: :::python), line numbering is left in the current state - off
by default.
"""
import re
#split text into lines
lines = self.src.split("\n")
#pull first line to examine
fl = lines.pop(0)
c = re.compile(r'''
(?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons.
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w+-]*) # The language
''', re.VERBOSE)
# search first line for shebang
m = c.search(fl)
if m:
# we have a match
try:
self.lang = m.group('lang').lower()
except IndexError:
self.lang = None
if m.group('path'):
# path exists - restore first line
lines.insert(0, fl)
if m.group('shebang') and self.linenos is None:
# shebang exists - use line numbers
self.linenos = True
else:
# No match
lines.insert(0, fl)
self.src = "\n".join(lines).strip("\n")
# ------------------ The Markdown Extension -------------------------------
class HiliteTreeprocessor(markdown.treeprocessors.Treeprocessor):
""" Hilight source code in code blocks. """
def run(self, root):
""" Find code blocks and store in htmlStash. """
blocks = root.getiterator('pre')
for block in blocks:
children = block.getchildren()
if len(children) == 1 and children[0].tag == 'code':
code = CodeHilite(children[0].text,
force_linenos=self.config['force_linenos'],
guess_lang=self.config['guess_lang'],
css_class=self.config['css_class'],
style=self.config['pygments_style'],
noclasses=self.config['noclasses'],
tab_length=self.markdown.tab_length)
placeholder = self.markdown.htmlStash.store(code.hilite(),
safe=True)
# Clear codeblock in etree instance
block.clear()
# Change to p element which will later
# be removed when inserting raw html
block.tag = 'p'
block.text = placeholder
class CodeHiliteExtension(markdown.Extension):
""" Add source code hilighting to markdown codeblocks. """
def __init__(self, configs):
# define default configs
self.config = {
'force_linenos' : [None, "Force line numbers - Default: detect based on shebang"],
'guess_lang' : [True, "Automatic language detection - Default: True"],
'css_class' : ["codehilite",
"Set class name for wrapper <div> - Default: codehilite"],
'pygments_style' : ['default', 'Pygments HTML Formatter Style (Colorscheme) - Default: default'],
'noclasses': [False, 'Use inline styles instead of CSS classes - Default false']
}
# Override defaults with user settings
for key, value in configs:
# convert strings to booleans
if value == 'True': value = True
if value == 'False': value = False
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
""" Add HilitePostprocessor to Markdown instance. """
hiliter = HiliteTreeprocessor(md)
hiliter.config = self.getConfigs()
md.treeprocessors.add("hilite", hiliter, "<inline")
md.registerExtension(self)
def makeExtension(configs={}):
return CodeHiliteExtension(configs=configs)
| apache-2.0 |
campbe13/openhatch | vendor/packages/Django/tests/regressiontests/bug639/tests.py | 102 | 1434 | """
Tests for file field behavior, and specifically #639, in which Model.save()
gets called *again* for each FileField. This test will fail if calling a
ModelForm's save() method causes Model.save() to be called more than once.
"""
from __future__ import absolute_import
import os
import shutil
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils import unittest
from django.utils._os import upath
from .models import Photo, PhotoForm, temp_storage_dir
class Bug639Test(unittest.TestCase):
def testBug639(self):
"""
Simulate a file upload and check how many times Model.save() gets
called.
"""
# Grab an image for testing.
filename = os.path.join(os.path.dirname(upath(__file__)), "test.jpg")
with open(filename, "rb") as fp:
img = fp.read()
# Fake a POST QueryDict and FILES MultiValueDict.
data = {'title': 'Testing'}
files = {"image": SimpleUploadedFile('test.jpg', img, 'image/jpeg')}
form = PhotoForm(data=data, files=files)
p = form.save()
# Check the savecount stored on the object (see the model).
self.assertEqual(p._savecount, 1)
def tearDown(self):
"""
Make sure to delete the "uploaded" file to avoid clogging /tmp.
"""
p = Photo.objects.get()
p.image.delete(save=False)
shutil.rmtree(temp_storage_dir)
| agpl-3.0 |
mbauskar/alec_frappe5_erpnext | erpnext/hr/report/employee_leave_balance/employee_leave_balance.py | 31 | 2575 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.desk.reportview import execute as runreport
def execute(filters=None):
if not filters: filters = {}
employee_filters = {
"status": "Active"
}
if filters.get("company"):
filters["company"] = filters.company
employees = runreport(doctype="Employee", fields=["name", "employee_name", "department"],
filters=employee_filters, limit_page_length=None)
if not employees:
frappe.throw(_("No employee found!"))
leave_types = frappe.db.sql_list("select name from `tabLeave Type`")
if filters.get("fiscal_year"):
fiscal_years = [filters["fiscal_year"]]
else:
fiscal_years = frappe.db.sql_list("select name from `tabFiscal Year` order by name desc")
employee_names = [d.name for d in employees]
allocations = frappe.db.sql("""select employee, fiscal_year, leave_type, total_leaves_allocated
from `tabLeave Allocation`
where docstatus=1 and employee in (%s)""" %
','.join(['%s']*len(employee_names)), employee_names, as_dict=True)
applications = frappe.db.sql("""select employee, fiscal_year, leave_type,
SUM(total_leave_days) as leaves
from `tabLeave Application`
where status="Approved" and docstatus = 1 and employee in (%s)
group by employee, fiscal_year, leave_type""" %
','.join(['%s']*len(employee_names)), employee_names, as_dict=True)
columns = [
_("Fiscal Year"), _("Employee") + ":Link/Employee:150", _("Employee Name") + "::200", _("Department") +"::150"
]
for leave_type in leave_types:
columns.append(_(leave_type) + " " + _("Allocated") + ":Float")
columns.append(_(leave_type) + " " + _("Taken") + ":Float")
columns.append(_(leave_type) + " " + _("Balance") + ":Float")
data = {}
for d in allocations:
data.setdefault((d.fiscal_year, d.employee,
d.leave_type), frappe._dict()).allocation = d.total_leaves_allocated
for d in applications:
data.setdefault((d.fiscal_year, d.employee,
d.leave_type), frappe._dict()).leaves = d.leaves
result = []
for fiscal_year in fiscal_years:
for employee in employees:
row = [fiscal_year, employee.name, employee.employee_name, employee.department]
result.append(row)
for leave_type in leave_types:
tmp = data.get((fiscal_year, employee.name, leave_type), frappe._dict())
row.append(tmp.allocation or 0)
row.append(tmp.leaves or 0)
row.append((tmp.allocation or 0) - (tmp.leaves or 0))
return columns, result
| agpl-3.0 |
bcoca/ansible | lib/ansible/playbook/role_include.py | 18 | 7414 | #
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from os.path import basename
import ansible.constants as C
from ansible.errors import AnsibleParserError
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.block import Block
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role import Role
from ansible.playbook.role.include import RoleInclude
from ansible.utils.display import Display
from ansible.module_utils.six import string_types
__all__ = ['IncludeRole']
display = Display()
class IncludeRole(TaskInclude):
"""
A Role include is derived from a regular role to handle the special
circumstances related to the `- include_role: ...`
"""
BASE = ('name', 'role') # directly assigned
FROM_ARGS = ('tasks_from', 'vars_from', 'defaults_from', 'handlers_from') # used to populate from dict in role
OTHER_ARGS = ('apply', 'public', 'allow_duplicates', 'rolespec_validate') # assigned to matching property
VALID_ARGS = tuple(frozenset(BASE + FROM_ARGS + OTHER_ARGS)) # all valid args
# =================================================================================
# ATTRIBUTES
# private as this is a 'module options' vs a task property
_allow_duplicates = FieldAttribute(isa='bool', default=True, private=True)
_public = FieldAttribute(isa='bool', default=False, private=True)
_rolespec_validate = FieldAttribute(isa='bool', default=True)
def __init__(self, block=None, role=None, task_include=None):
super(IncludeRole, self).__init__(block=block, role=role, task_include=task_include)
self._from_files = {}
self._parent_role = role
self._role_name = None
self._role_path = None
def get_name(self):
''' return the name of the task '''
return self.name or "%s : %s" % (self.action, self._role_name)
def get_block_list(self, play=None, variable_manager=None, loader=None):
# only need play passed in when dynamic
if play is None:
myplay = self._parent._play
else:
myplay = play
ri = RoleInclude.load(self._role_name, play=myplay, variable_manager=variable_manager, loader=loader, collection_list=self.collections)
ri.vars.update(self.vars)
# build role
actual_role = Role.load(ri, myplay, parent_role=self._parent_role, from_files=self._from_files,
from_include=True, validate=self.rolespec_validate)
actual_role._metadata.allow_duplicates = self.allow_duplicates
if self.statically_loaded or self.public:
myplay.roles.append(actual_role)
# save this for later use
self._role_path = actual_role._role_path
# compile role with parent roles as dependencies to ensure they inherit
# variables
if not self._parent_role:
dep_chain = []
else:
dep_chain = list(self._parent_role._parents)
dep_chain.append(self._parent_role)
p_block = self.build_parent_block()
# collections value is not inherited; override with the value we calculated during role setup
p_block.collections = actual_role.collections
blocks = actual_role.compile(play=myplay, dep_chain=dep_chain)
for b in blocks:
b._parent = p_block
# HACK: parent inheritance doesn't seem to have a way to handle this intermediate override until squashed/finalized
b.collections = actual_role.collections
# updated available handlers in play
handlers = actual_role.get_handler_blocks(play=myplay)
for h in handlers:
h._parent = p_block
myplay.handlers = myplay.handlers + handlers
return blocks, handlers
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
ir = IncludeRole(block, role, task_include=task_include).load_data(data, variable_manager=variable_manager, loader=loader)
# Validate options
my_arg_names = frozenset(ir.args.keys())
# name is needed, or use role as alias
ir._role_name = ir.args.get('name', ir.args.get('role'))
if ir._role_name is None:
raise AnsibleParserError("'name' is a required field for %s." % ir.action, obj=data)
if 'public' in ir.args and ir.action not in C._ACTION_INCLUDE_ROLE:
raise AnsibleParserError('Invalid options for %s: public' % ir.action, obj=data)
# validate bad args, otherwise we silently ignore
bad_opts = my_arg_names.difference(IncludeRole.VALID_ARGS)
if bad_opts:
raise AnsibleParserError('Invalid options for %s: %s' % (ir.action, ','.join(list(bad_opts))), obj=data)
# build options for role includes
for key in my_arg_names.intersection(IncludeRole.FROM_ARGS):
from_key = key.replace('_from', '')
args_value = ir.args.get(key)
if not isinstance(args_value, string_types):
raise AnsibleParserError('Expected a string for %s but got %s instead' % (key, type(args_value)))
ir._from_files[from_key] = basename(args_value)
apply_attrs = ir.args.get('apply', {})
if apply_attrs and ir.action not in C._ACTION_INCLUDE_ROLE:
raise AnsibleParserError('Invalid options for %s: apply' % ir.action, obj=data)
elif not isinstance(apply_attrs, dict):
raise AnsibleParserError('Expected a dict for apply but got %s instead' % type(apply_attrs), obj=data)
# manual list as otherwise the options would set other task parameters we don't want.
for option in my_arg_names.intersection(IncludeRole.OTHER_ARGS):
setattr(ir, option, ir.args.get(option))
return ir
def copy(self, exclude_parent=False, exclude_tasks=False):
new_me = super(IncludeRole, self).copy(exclude_parent=exclude_parent, exclude_tasks=exclude_tasks)
new_me.statically_loaded = self.statically_loaded
new_me._from_files = self._from_files.copy()
new_me._parent_role = self._parent_role
new_me._role_name = self._role_name
new_me._role_path = self._role_path
return new_me
def get_include_params(self):
v = super(IncludeRole, self).get_include_params()
if self._parent_role:
v.update(self._parent_role.get_role_params())
v.setdefault('ansible_parent_role_names', []).insert(0, self._parent_role.get_name())
v.setdefault('ansible_parent_role_paths', []).insert(0, self._parent_role._role_path)
return v
| gpl-3.0 |
piquadrat/django | tests/forms_tests/field_tests/test_charfield.py | 14 | 6327 | from django.forms import (
CharField, HiddenInput, PasswordInput, Textarea, TextInput,
ValidationError,
)
from django.test import SimpleTestCase
from . import FormFieldAssertionsMixin
class CharFieldTest(FormFieldAssertionsMixin, SimpleTestCase):
def test_charfield_1(self):
f = CharField()
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertIsNone(f.max_length)
self.assertIsNone(f.min_length)
def test_charfield_2(self):
f = CharField(required=False)
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertEqual('', f.clean(None))
self.assertEqual('', f.clean(''))
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertIsNone(f.max_length)
self.assertIsNone(f.min_length)
def test_charfield_3(self):
f = CharField(max_length=10, required=False)
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
msg = "'Ensure this value has at most 10 characters (it has 11).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('1234567890a')
self.assertEqual(f.max_length, 10)
self.assertIsNone(f.min_length)
def test_charfield_4(self):
f = CharField(min_length=10, required=False)
self.assertEqual('', f.clean(''))
msg = "'Ensure this value has at least 10 characters (it has 5).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertIsNone(f.max_length)
self.assertEqual(f.min_length, 10)
def test_charfield_5(self):
f = CharField(min_length=10, required=True)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
msg = "'Ensure this value has at least 10 characters (it has 5).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertIsNone(f.max_length)
self.assertEqual(f.min_length, 10)
def test_charfield_length_not_int(self):
"""
Setting min_length or max_length to something that is not a number
raises an exception.
"""
with self.assertRaises(ValueError):
CharField(min_length='a')
with self.assertRaises(ValueError):
CharField(max_length='a')
msg = '__init__() takes 1 positional argument but 2 were given'
with self.assertRaisesMessage(TypeError, msg):
CharField('a')
def test_charfield_widget_attrs(self):
"""
CharField.widget_attrs() always returns a dictionary and includes
minlength/maxlength if min_length/max_length are defined on the field
and the widget is not hidden.
"""
# Return an empty dictionary if max_length and min_length are both None.
f = CharField()
self.assertEqual(f.widget_attrs(TextInput()), {})
self.assertEqual(f.widget_attrs(Textarea()), {})
# Return a maxlength attribute equal to max_length.
f = CharField(max_length=10)
self.assertEqual(f.widget_attrs(TextInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(Textarea()), {'maxlength': '10'})
# Return a minlength attribute equal to min_length.
f = CharField(min_length=5)
self.assertEqual(f.widget_attrs(TextInput()), {'minlength': '5'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'minlength': '5'})
self.assertEqual(f.widget_attrs(Textarea()), {'minlength': '5'})
# Return both maxlength and minlength when both max_length and
# min_length are set.
f = CharField(max_length=10, min_length=5)
self.assertEqual(f.widget_attrs(TextInput()), {'maxlength': '10', 'minlength': '5'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'maxlength': '10', 'minlength': '5'})
self.assertEqual(f.widget_attrs(Textarea()), {'maxlength': '10', 'minlength': '5'})
self.assertEqual(f.widget_attrs(HiddenInput()), {})
def test_charfield_strip(self):
"""
Values have whitespace stripped but not if strip=False.
"""
f = CharField()
self.assertEqual(f.clean(' 1'), '1')
self.assertEqual(f.clean('1 '), '1')
f = CharField(strip=False)
self.assertEqual(f.clean(' 1'), ' 1')
self.assertEqual(f.clean('1 '), '1 ')
def test_strip_before_checking_empty(self):
"""
A whitespace-only value, ' ', is stripped to an empty string and then
converted to the empty value, None.
"""
f = CharField(required=False, empty_value=None)
self.assertIsNone(f.clean(' '))
def test_clean_non_string(self):
"""CharField.clean() calls str(value) before stripping it."""
class StringWrapper:
def __init__(self, v):
self.v = v
def __str__(self):
return self.v
value = StringWrapper(' ')
f1 = CharField(required=False, empty_value=None)
self.assertIsNone(f1.clean(value))
f2 = CharField(strip=False)
self.assertEqual(f2.clean(value), ' ')
def test_charfield_disabled(self):
f = CharField(disabled=True)
self.assertWidgetRendersTo(f, '<input type="text" name="f" id="id_f" disabled required />')
def test_null_characters_prohibited(self):
f = CharField()
msg = 'Null characters are not allowed.'
with self.assertRaisesMessage(ValidationError, msg):
f.clean('\x00something')
| bsd-3-clause |
ebagdasa/tempest | tempest/api/image/v1/test_image_members_negative.py | 11 | 2155 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.image import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest import test
class ImageMembersNegativeTest(base.BaseV1ImageMembersTest):
@test.attr(type=['negative', 'gate'])
def test_add_member_with_non_existing_image(self):
# Add member with non existing image.
non_exist_image = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.add_member,
self.alt_tenant_id, non_exist_image)
@test.attr(type=['negative', 'gate'])
def test_delete_member_with_non_existing_image(self):
# Delete member with non existing image.
non_exist_image = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound, self.client.delete_member,
self.alt_tenant_id, non_exist_image)
@test.attr(type=['negative', 'gate'])
def test_delete_member_with_non_existing_tenant(self):
# Delete member with non existing tenant.
image_id = self._create_image()
non_exist_tenant = data_utils.rand_uuid_hex()
self.assertRaises(exceptions.NotFound, self.client.delete_member,
non_exist_tenant, image_id)
@test.attr(type=['negative', 'gate'])
def test_get_image_without_membership(self):
# Image is hidden from another tenants.
image_id = self._create_image()
self.assertRaises(exceptions.NotFound,
self.alt_img_cli.get_image,
image_id)
| apache-2.0 |
e-koch/TurbuStat | Examples/paper_plots/test_fBM_delvar_vs_idl.py | 2 | 1776 |
'''
Compare Turbustat's Delta-variance to the original IDL code.
'''
from turbustat.statistics import DeltaVariance
from turbustat.simulator import make_extended
import astropy.io.fits as fits
from astropy.table import Table
import matplotlib.pyplot as plt
import astropy.units as u
import seaborn as sb
font_scale = 1.25
width = 4.2
# Keep the default ratio used in seaborn. This can get overwritten.
height = (4.4 / 6.4) * width
figsize = (width, height)
sb.set_context("paper", font_scale=font_scale,
rc={"figure.figsize": figsize})
sb.set_palette("colorblind")
col_pal = sb.color_palette()
plt.rcParams['axes.unicode_minus'] = False
size = 256
markers = ['D', 'o']
# Make a single figure example to save space in the paper.
fig = plt.figure(figsize=figsize)
slope = 3.0
test_img = fits.PrimaryHDU(make_extended(size, powerlaw=slope))
# The power-law behaviour continues up to ~1/4 of the size
delvar = DeltaVariance(test_img).run(xlow=3 * u.pix,
xhigh=0.25 * size * u.pix,
boundary='wrap')
plt.xscale("log")
plt.yscale("log")
plt.errorbar(delvar.lags.value, delvar.delta_var,
yerr=delvar.delta_var_error,
fmt=markers[0], label='TurbuStat')
# Now plot the IDL output
tab = Table.read("deltavar_{}.txt".format(slope), format='ascii')
# First is pixel scale, second is delvar, then delvar error, and finally
# the fit values
plt.errorbar(tab['col1'], tab['col2'], yerr=tab['col3'],
fmt=markers[1], label='IDL')
plt.grid()
plt.legend(frameon=True)
plt.ylabel(r"$\Delta$-Variance")
plt.xlabel("Scales (pix)")
plt.tight_layout()
plt.savefig("../figures/delvar_vs_idl.png")
plt.savefig("../figures/delvar_vs_idl.pdf")
plt.close()
| mit |
RubnC/modified-spectral | spectral/tests/__init__.py | 2 | 2143 | #########################################################################
#
# __init__.py - This file is part of the Spectral Python (SPy) package.
#
# Copyright (C) 2013 Thomas Boggs
#
# Spectral Python is free software; you can redistribute it and/
# or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# Spectral Python is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; if not, write to
#
# Free Software Foundation, Inc.
# 59 Temple Place, Suite 330
# Boston, MA 02111-1307
# USA
#
#########################################################################
#
# Send comments to:
# Thomas Boggs, tboggs@users.sourceforge.net
#
'''Package containing unit test modules for various functionality.
To run all unit tests, type the following from the system command line:
# python -m spectral.tests.run
'''
from __future__ import division, print_function, unicode_literals
# If abort_on_fail is True, an AssertionError will be raised when a unit test
# fails; otherwise, the failure will be printed to stdout and testing will
# continue.
abort_on_fail = True
# Summary stats of unit test execution
_num_tests_run = 0
_num_tests_failed = 0
# Subdirectory to be created for unit test files
testdir = 'spectral_test_files'
from . import spyfile
from . import transforms
from . import memmap
from . import envi
from . import spymath
from . import detectors
from . import classifiers
from . import dimensionality
from . import spatial
from . import iterators
# List of all submodules to be run from the `run` submodule.
all_tests = [spyfile, memmap, iterators, transforms, envi, spymath, detectors,
classifiers, dimensionality, spatial]
| gpl-2.0 |
ccastell/Transfer-System | Website/env/lib/python3.5/site-packages/django/db/models/fields/__init__.py | 10 | 88153 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
import copy
import datetime
import decimal
import itertools
import uuid
import warnings
from base64 import b64decode, b64encode
from functools import total_ordering
from django import forms
from django.apps import apps
from django.conf import settings
from django.core import checks, exceptions, validators
# When the _meta object was formalized, this exception was moved to
# django.core.exceptions. It is retained here for backwards compatibility
# purposes.
from django.core.exceptions import FieldDoesNotExist # NOQA
from django.db import connection, connections, router
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin
from django.utils import six, timezone
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.deprecation import (
RemovedInDjango20Warning, warn_about_renamed_method,
)
from django.utils.duration import duration_string
from django.utils.encoding import (
force_bytes, force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.functional import Promise, cached_property, curry
from django.utils.ipv6 import clean_ipv6_address
from django.utils.itercompat import is_iterable
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
# Avoid "TypeError: Item in ``from list'' not a string" -- unicode_literals
# makes these strings unicode
__all__ = [str(x) for x in (
'AutoField', 'BLANK_CHOICE_DASH', 'BigAutoField', 'BigIntegerField',
'BinaryField', 'BooleanField', 'CharField', 'CommaSeparatedIntegerField',
'DateField', 'DateTimeField', 'DecimalField', 'DurationField',
'EmailField', 'Empty', 'Field', 'FieldDoesNotExist', 'FilePathField',
'FloatField', 'GenericIPAddressField', 'IPAddressField', 'IntegerField',
'NOT_PROVIDED', 'NullBooleanField', 'PositiveIntegerField',
'PositiveSmallIntegerField', 'SlugField', 'SmallIntegerField', 'TextField',
'TimeField', 'URLField', 'UUIDField',
)]
class Empty(object):
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field(field_name)
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
def return_None():
return None
@total_ordering
@python_2_unicode_compatible
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
# Translators: The 'lookup_type' is one of 'date', 'year' or 'month'.
# Eg: "Title must be unique for pub_date year"
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
system_check_deprecated_details = None
system_check_removed_details = None
# Field flags
hidden = False
many_to_many = None
many_to_one = None
one_to_many = None
one_to_one = None
related_model = None
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=(),
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.remote_field = rel
self.is_relation = self.remote_field is not None
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
if isinstance(choices, collections.Iterator):
choices = list(choices)
self.choices = choices or []
self.help_text = help_text
self.db_index = db_index
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = list(validators) # Store for deconstruction later
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def __str__(self):
"""
Return "app_label.model_label.field_name" for fields attached to
models.
"""
if not hasattr(self, 'model'):
return super(Field, self).__str__()
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_choices())
errors.extend(self._check_db_index())
errors.extend(self._check_null_allowed_for_primary_keys())
errors.extend(self._check_backend_specific_checks(**kwargs))
errors.extend(self._check_deprecation_details())
return errors
def _check_field_name(self):
""" Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk". """
if self.name.endswith('_'):
return [
checks.Error(
'Field names must not end with an underscore.',
obj=self,
id='fields.E001',
)
]
elif LOOKUP_SEP in self.name:
return [
checks.Error(
'Field names must not contain "%s".' % (LOOKUP_SEP,),
obj=self,
id='fields.E002',
)
]
elif self.name == 'pk':
return [
checks.Error(
"'pk' is a reserved word that cannot be used as a field name.",
obj=self,
id='fields.E003',
)
]
else:
return []
@property
def rel(self):
warnings.warn(
"Usage of field.rel has been deprecated. Use field.remote_field instead.",
RemovedInDjango20Warning, 2)
return self.remote_field
def _check_choices(self):
if self.choices:
if (isinstance(self.choices, six.string_types) or
not is_iterable(self.choices)):
return [
checks.Error(
"'choices' must be an iterable (e.g., a list or tuple).",
obj=self,
id='fields.E004',
)
]
elif any(isinstance(choice, six.string_types) or
not is_iterable(choice) or len(choice) != 2
for choice in self.choices):
return [
checks.Error(
"'choices' must be an iterable containing "
"(actual value, human readable name) tuples.",
obj=self,
id='fields.E005',
)
]
else:
return []
else:
return []
def _check_db_index(self):
if self.db_index not in (None, True, False):
return [
checks.Error(
"'db_index' must be None, True or False.",
obj=self,
id='fields.E006',
)
]
else:
return []
def _check_null_allowed_for_primary_keys(self):
if (self.primary_key and self.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
return [
checks.Error(
'Primary keys must not have null=True.',
hint=('Set null=False on the field, or '
'remove primary_key=True argument.'),
obj=self,
id='fields.E007',
)
]
else:
return []
def _check_backend_specific_checks(self, **kwargs):
app_label = self.model._meta.app_label
for db in connections:
if router.allow_migrate(db, app_label, model_name=self.model._meta.model_name):
return connections[db].validation.check_field(self, **kwargs)
return []
def _check_deprecation_details(self):
if self.system_check_removed_details is not None:
return [
checks.Error(
self.system_check_removed_details.get(
'msg',
'%s has been removed except for support in historical '
'migrations.' % self.__class__.__name__
),
hint=self.system_check_removed_details.get('hint'),
obj=self,
id=self.system_check_removed_details.get('id', 'fields.EXXX'),
)
]
elif self.system_check_deprecated_details is not None:
return [
checks.Warning(
self.system_check_deprecated_details.get(
'msg',
'%s has been deprecated.' % self.__class__.__name__
),
hint=self.system_check_deprecated_details.get('hint'),
obj=self,
id=self.system_check_deprecated_details.get('id', 'fields.WXXX'),
)
]
return []
def get_col(self, alias, output_field=None):
if output_field is None:
output_field = self
if alias != self.model._meta.db_table or output_field != self:
from django.db.models.expressions import Col
return Col(alias, self, output_field)
else:
return self.cached_col
@cached_property
def cached_col(self):
from django.db.models.expressions import Col
return Col(self.model._meta.db_table, self)
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, GIS columns need to be
selected as AsText(table.col) on MySQL as the table.col data can't be used
by Django.
"""
return sql, params
def deconstruct(self):
"""
Returns enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class has been run
* The import path of the field, including the class: django.db.models.IntegerField
This should be the most portable version, so less specific may be better.
* A list of positional arguments
* A dict of keyword arguments
Note that the positional or keyword arguments must contain values of the
following types (including inner values of collection types):
* None, bool, str, unicode, int, long, float, complex, set, frozenset, list, tuple, dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialized into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this time,
just ensure that the resulting field is the same - prefer keyword arguments
over positional ones, and omit parameters with their default values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": [],
"help_text": '',
"db_column": None,
"db_tablespace": settings.DEFAULT_INDEX_TABLESPACE,
"auto_created": False,
"validators": [],
"error_messages": None,
}
attr_overrides = {
"unique": "_unique",
"error_messages": "_error_messages",
"validators": "_validators",
"verbose_name": "_verbose_name",
}
equals_comparison = {"choices", "validators", "db_tablespace"}
for name, default in possibles.items():
value = getattr(self, attr_overrides.get(name, name))
# Unroll anything iterable for choices into a concrete list
if name == "choices" and isinstance(value, collections.Iterable):
value = list(value)
# Do correct kind of comparison
if name in equals_comparison:
if value != default:
keywords[name] = value
else:
if value is not default:
keywords[name] = value
# Work out path - we shorten it for known Django core fields
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if path.startswith("django.db.models.fields.related"):
path = path.replace("django.db.models.fields.related", "django.db.models")
if path.startswith("django.db.models.fields.files"):
path = path.replace("django.db.models.fields.files", "django.db.models")
if path.startswith("django.db.models.fields.proxy"):
path = path.replace("django.db.models.fields.proxy", "django.db.models")
if path.startswith("django.db.models.fields"):
path = path.replace("django.db.models.fields", "django.db.models")
# Return basic info - other fields should override this.
return (
force_text(self.name, strings_only=True),
path,
[],
keywords,
)
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
"""
name, path, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.remote_field:
obj.remote_field = copy.copy(self.remote_field)
if hasattr(self.remote_field, 'field') and self.remote_field.field is self:
obj.remote_field.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, we use the app registry to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
return _empty, (self.__class__,), self.__dict__
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def get_pk_value_on_save(self, instance):
"""
Hook to generate new PK values on save. This method is called when
saving instances with no primary key value set. If this method returns
something else than None, then the returned value is used when saving
the new instance.
"""
if self.default:
return self.get_default()
return None
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
@cached_property
def validators(self):
"""
Some validators can't be created at field initialization time.
This method provides a way to delay their creation until required.
"""
return list(itertools.chain(self.default_validators, self._validators))
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self.choices and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_check(self, connection):
"""
Return the database column check constraint for this field, for the
provided connection. Works the same way as db_type() for the case that
get_internal_type() does not map to a preexisting model field.
"""
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.data_type_check_constraints[self.get_internal_type()] % data
except KeyError:
return None
def db_type(self, connection):
"""
Return the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific data_types dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.data_types[self.get_internal_type()] % data
except KeyError:
return None
def rel_db_type(self, connection):
"""
Return the data type that a related field pointing to this field should
use. For example, this method is called by ForeignKey and OneToOneField
to determine its data type.
"""
return self.db_type(connection)
def db_parameters(self, connection):
"""
Extension of db_type(), providing a range of different return
values (type, checks).
This will look at db_type(), allowing custom model fields to override it.
"""
type_string = self.db_type(connection)
check_string = self.db_check(connection)
return {
"type": type_string,
"check": check_string,
}
def db_type_suffix(self, connection):
return connection.data_types_suffix.get(self.get_internal_type())
def get_db_converters(self, connection):
if hasattr(self, 'from_db_value'):
return [self.from_db_value]
return []
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
self.concrete = self.column is not None
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, private_only=False, virtual_only=NOT_PROVIDED):
"""
Register the field with the model class it belongs to.
If private_only is True, a separate instance of this field will be
created for every subclass of cls, even if cls is not an abstract
model.
"""
if virtual_only is not NOT_PROVIDED:
warnings.warn(
"The `virtual_only` argument of Field.contribute_to_class() "
"has been renamed to `private_only`.",
RemovedInDjango20Warning, stacklevel=2
)
private_only = virtual_only
self.set_attributes_from_name(name)
self.model = cls
if private_only:
cls._meta.add_field(self, private=True)
else:
cls._meta.add_field(self)
if self.column:
# Don't override classmethods with the descriptor. This means that
# if you have a classmethod and a field with the same name, then
# such fields can't be deferred (we don't have a check for this).
if not getattr(cls, self.attname, None):
setattr(cls, self.attname, DeferredAttribute(self.attname, cls))
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_filter_kwargs_for_object(self, obj):
"""
Return a dict that when passed as kwargs to self.model.filter(), would
yield all instances having the same value for this field as obj has.
"""
return {self.name: getattr(obj, self.attname)}
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of get_db_prep_save().
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
return self._get_default()
@cached_property
def _get_default(self):
if self.has_default():
if callable(self.default):
return self.default
return lambda: self.default
if not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls:
return return_None
return six.text_type # returns empty string
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
blank_defined = False
choices = list(self.choices) if self.choices else []
named_groups = choices and isinstance(choices[0][1], (list, tuple))
if not named_groups:
for choice, __ in choices:
if choice in ('', None):
blank_defined = True
break
first_choice = (blank_choice if include_blank and
not blank_defined else [])
if self.choices:
return first_choice + choices
rel_model = self.remote_field.model
limit_choices_to = limit_choices_to or self.get_limit_choices_to()
if hasattr(self.remote_field, 'get_related_field'):
lst = [(getattr(x, self.remote_field.get_related_field().attname),
smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
return first_choice + lst
@warn_about_renamed_method(
'Field', '_get_val_from_obj', 'value_from_object',
RemovedInDjango20Warning
)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return force_text(self.value_from_object(obj))
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice, value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial', 'disabled'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(AutoField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(AutoField, self).check(**kwargs)
errors.extend(self._check_primary_key())
return errors
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
'AutoFields must set primary_key=True.',
obj=self,
id='fields.E100',
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(AutoField, self).deconstruct()
del kwargs['blank']
kwargs['primary_key'] = True
return name, path, args, kwargs
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def rel_db_type(self, connection):
return IntegerField().db_type(connection=connection)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
value = super(AutoField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name, **kwargs):
assert not cls._meta.auto_field, "A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name, **kwargs)
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BigAutoField(AutoField):
description = _("Big (8 byte) integer")
def get_internal_type(self):
return "BigAutoField"
def rel_db_type(self, connection):
return BigIntegerField().db_type(connection=connection)
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(BooleanField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(BooleanField, self).check(**kwargs)
errors.extend(self._check_null(**kwargs))
return errors
def _check_null(self, **kwargs):
if getattr(self, 'null', False):
return [
checks.Error(
'BooleanFields do not accept null values.',
hint='Use a NullBooleanField instead.',
obj=self,
id='fields.E110',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(BooleanField, self).deconstruct()
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_value(self, value):
value = super(BooleanField, self).get_prep_value(value)
if value is None:
return None
return self.to_python(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
errors = super(CharField, self).check(**kwargs)
errors.extend(self._check_max_length_attribute(**kwargs))
return errors
def _check_max_length_attribute(self, **kwargs):
if self.max_length is None:
return [
checks.Error(
"CharFields must define a 'max_length' attribute.",
obj=self,
id='fields.E120',
)
]
elif not isinstance(self.max_length, six.integer_types) or self.max_length <= 0:
return [
checks.Error(
"'max_length' must be a positive integer.",
obj=self,
id='fields.E121',
)
]
else:
return []
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return force_text(value)
def get_prep_value(self, value):
value = super(CharField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
# TODO: Handle multiple backends with different feature flags.
if self.null and not connection.features.interprets_empty_strings_as_nulls:
defaults['empty_value'] = None
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
system_check_deprecated_details = {
'msg': (
'CommaSeparatedIntegerField has been deprecated. Support '
'for it (except in historical migrations) will be removed '
'in Django 2.0.'
),
'hint': (
'Use CharField(validators=[validate_comma_separated_integer_list]) instead.'
),
'id': 'fields.W901',
}
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _('Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateTimeCheckMixin(object):
def check(self, **kwargs):
errors = super(DateTimeCheckMixin, self).check(**kwargs)
errors.extend(self._check_mutually_exclusive_options())
errors.extend(self._check_fix_default_value())
return errors
def _check_mutually_exclusive_options(self):
# auto_now, auto_now_add, and default are mutually exclusive
# options. The use of more than one of these options together
# will trigger an Error
mutually_exclusive_options = [self.auto_now_add, self.auto_now, self.has_default()]
enabled_options = [option not in (None, False) for option in mutually_exclusive_options].count(True)
if enabled_options > 1:
return [
checks.Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
obj=self,
id='fields.E160',
)
]
else:
return []
def _check_fix_default_value(self):
return []
class DateField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(DateField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
if not timezone.is_naive(value):
value = timezone.make_naive(value, timezone.utc)
value = value.date()
elif isinstance(value, datetime.date):
# Nothing to do, as dates don't have tz information
pass
else:
# No explicit date / datetime value -- no checks necessary
return []
offset = datetime.timedelta(days=1)
lower = (now - offset).date()
upper = (now + offset).date()
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(DateField, self).deconstruct()
if self.auto_now:
kwargs['auto_now'] = True
if self.auto_now_add:
kwargs['auto_now_add'] = True
if self.auto_now or self.auto_now_add:
del kwargs['editable']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name, **kwargs):
super(DateField, self).contribute_to_class(cls, name, **kwargs)
if not self.null:
setattr(
cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=True)
)
setattr(
cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=False)
)
def get_prep_value(self, value):
value = super(DateField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.date):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
lower = datetime.datetime(lower.year, lower.month, lower.day)
upper = now + second_offset
upper = datetime.datetime(upper.year, upper.month, upper.day)
value = datetime.datetime(value.year, value.month, value.day)
else:
# No explicit date / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_datetime'],
code='invalid_datetime',
params={'value': value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
def get_prep_value(self, value):
value = super(DateTimeField, self).get_prep_value(value)
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
try:
name = '%s.%s' % (self.model.__name__, self.name)
except AttributeError:
name = '(unbound)'
warnings.warn("DateTimeField %s received a naive datetime (%s)"
" while time zone support is active." %
(name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datetimefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(DecimalField, self).check(**kwargs)
digits_errors = self._check_decimal_places()
digits_errors.extend(self._check_max_digits())
if not digits_errors:
errors.extend(self._check_decimal_places_and_max_digits(**kwargs))
else:
errors.extend(digits_errors)
return errors
def _check_decimal_places(self):
try:
decimal_places = int(self.decimal_places)
if decimal_places < 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'decimal_places' attribute.",
obj=self,
id='fields.E130',
)
]
except ValueError:
return [
checks.Error(
"'decimal_places' must be a non-negative integer.",
obj=self,
id='fields.E131',
)
]
else:
return []
def _check_max_digits(self):
try:
max_digits = int(self.max_digits)
if max_digits <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'max_digits' attribute.",
obj=self,
id='fields.E132',
)
]
except ValueError:
return [
checks.Error(
"'max_digits' must be a positive integer.",
obj=self,
id='fields.E133',
)
]
else:
return []
def _check_decimal_places_and_max_digits(self, **kwargs):
if int(self.decimal_places) > int(self.max_digits):
return [
checks.Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
obj=self,
id='fields.E134',
)
]
return []
@cached_property
def validators(self):
return super(DecimalField, self).validators + [
validators.DecimalValidator(self.max_digits, self.decimal_places)
]
def deconstruct(self):
name, path, args, kwargs = super(DecimalField, self).deconstruct()
if self.max_digits is not None:
kwargs['max_digits'] = self.max_digits
if self.decimal_places is not None:
kwargs['decimal_places'] = self.decimal_places
return name, path, args, kwargs
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def _format(self, value):
if isinstance(value, six.string_types):
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.utils.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import utils
return utils.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.adapt_decimalfield_value(self.to_python(value), self.max_digits, self.decimal_places)
def get_prep_value(self, value):
value = super(DecimalField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class DurationField(Field):
"""Stores timedelta objects.
Uses interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint
of microseconds on other databases.
"""
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"[DD] [HH:[MM:]]ss[.uuuuuu] format.")
}
description = _("Duration")
def get_internal_type(self):
return "DurationField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.timedelta):
return value
try:
parsed = parse_duration(value)
except ValueError:
pass
else:
if parsed is not None:
return parsed
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_db_prep_value(self, value, connection, prepared=False):
if connection.features.has_native_duration_field:
return value
if value is None:
return None
# Discard any fractional microseconds due to floating point arithmetic.
return int(round(value.total_seconds() * 1000000))
def get_db_converters(self, connection):
converters = []
if not connection.features.has_native_duration_field:
converters.append(connection.ops.convert_durationfield_value)
return converters + super(DurationField, self).get_db_converters(connection)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else duration_string(val)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.DurationField,
}
defaults.update(kwargs)
return super(DurationField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length=254 to be compliant with RFCs 3696 and 5321
kwargs['max_length'] = kwargs.get('max_length', 254)
super(EmailField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(EmailField, self).deconstruct()
# We do not exclude max_length if it matches default as we want to change
# the default in future.
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FilePathField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FilePathField, self).check(**kwargs)
errors.extend(self._check_allowing_files_or_folders(**kwargs))
return errors
def _check_allowing_files_or_folders(self, **kwargs):
if not self.allow_files and not self.allow_folders:
return [
checks.Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
obj=self,
id='fields.E140',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(FilePathField, self).deconstruct()
if self.path != '':
kwargs['path'] = self.path
if self.match is not None:
kwargs['match'] = self.match
if self.recursive is not False:
kwargs['recursive'] = self.recursive
if self.allow_files is not True:
kwargs['allow_files'] = self.allow_files
if self.allow_folders is not False:
kwargs['allow_folders'] = self.allow_folders
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(FilePathField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
value = super(FloatField, self).get_prep_value(value)
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
description = _("Integer")
def check(self, **kwargs):
errors = super(IntegerField, self).check(**kwargs)
errors.extend(self._check_max_length_warning())
return errors
def _check_max_length_warning(self):
if self.max_length is not None:
return [
checks.Warning(
"'max_length' is ignored when used with IntegerField",
hint="Remove 'max_length' from field",
obj=self,
id='fields.W122',
)
]
return []
@cached_property
def validators(self):
# These validators can't be added at field initialization time since
# they're based on values retrieved from `connection`.
validators_ = super(IntegerField, self).validators
internal_type = self.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None:
for validator in validators_:
if isinstance(validator, validators.MinValueValidator) and validator.limit_value >= min_value:
break
else:
validators_.append(validators.MinValueValidator(min_value))
if max_value is not None:
for validator in validators_:
if isinstance(validator, validators.MaxValueValidator) and validator.limit_value <= max_value:
break
else:
validators_.append(validators.MaxValueValidator(max_value))
return validators_
def get_prep_value(self, value):
value = super(IntegerField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
system_check_removed_details = {
'msg': (
'IPAddressField has been removed except for support in '
'historical migrations.'
),
'hint': 'Use GenericIPAddressField instead.',
'id': 'fields.E900',
}
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
super(IPAddressField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(IPAddressField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(IPAddressField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def get_internal_type(self):
return "IPAddressField"
class GenericIPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
super(GenericIPAddressField, self).__init__(verbose_name, name, *args,
**kwargs)
def check(self, **kwargs):
errors = super(GenericIPAddressField, self).check(**kwargs)
errors.extend(self._check_blank_and_null_values(**kwargs))
return errors
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, 'null', False) and getattr(self, 'blank', False):
return [
checks.Error(
'GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.',
obj=self,
id='fields.E150',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(GenericIPAddressField, self).deconstruct()
if self.unpack_ipv4 is not False:
kwargs['unpack_ipv4'] = self.unpack_ipv4
if self.protocol != "both":
kwargs['protocol'] = self.protocol
if kwargs.get("max_length") == 39:
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value is None:
return None
if not isinstance(value, six.string_types):
value = force_text(value)
value = value.strip()
if ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_ipaddressfield_value(value)
def get_prep_value(self, value):
value = super(GenericIPAddressField, self).get_prep_value(value)
if value is None:
return None
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'protocol': self.protocol,
'form_class': forms.GenericIPAddressField,
}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
super(NullBooleanField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(NullBooleanField, self).deconstruct()
del kwargs['null']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_value(self, value):
value = super(NullBooleanField, self).get_prep_value(value)
if value is None:
return None
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {'form_class': forms.NullBooleanField}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerRelDbTypeMixin(object):
def rel_db_type(self, connection):
"""
Return the data type that a related field pointing to this field should
use. In most cases, a foreign key pointing to a positive integer
primary key will have an integer column data type but some databases
(e.g. MySQL) have an unsigned integer type. In that case
(related_fields_match_type=True), the primary key should return its
db_type.
"""
if connection.features.related_fields_match_type:
return self.db_type(connection)
else:
return IntegerField().db_type(connection=connection)
class PositiveIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
self.allow_unicode = kwargs.pop('allow_unicode', False)
if self.allow_unicode:
self.default_validators = [validators.validate_unicode_slug]
super(SlugField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(SlugField, self).deconstruct()
if kwargs.get("max_length") == 50:
del kwargs['max_length']
if self.db_index is False:
kwargs['db_index'] = False
else:
del kwargs['db_index']
if self.allow_unicode is not False:
kwargs['allow_unicode'] = self.allow_unicode
return name, path, args, kwargs
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField, 'allow_unicode': self.allow_unicode}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return force_text(value)
def get_prep_value(self, value):
value = super(TextField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length, 'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _("'%(value)s' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(TimeField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
time or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.time):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
value = datetime.datetime.combine(now.date(), value)
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc).time()
else:
# No explicit time / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(TimeField, self).deconstruct()
if self.auto_now is not False:
kwargs["auto_now"] = self.auto_now
if self.auto_now_add is not False:
kwargs["auto_now_add"] = self.auto_now_add
if self.auto_now or self.auto_now_add:
del kwargs['blank']
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_time'],
code='invalid_time',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
value = super(TimeField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_timefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
super(URLField, self).__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(URLField, self).deconstruct()
if kwargs.get("max_length") == 200:
del kwargs['max_length']
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b'']
def __init__(self, *args, **kwargs):
kwargs['editable'] = False
super(BinaryField, self).__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def deconstruct(self):
name, path, args, kwargs = super(BinaryField, self).deconstruct()
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "BinaryField"
def get_placeholder(self, value, compiler, connection):
return connection.ops.binary_placeholder_sql(value)
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super(BinaryField, self).get_default()
if default == '':
return b''
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super(BinaryField, self).get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(force_bytes(self.value_from_object(obj))).decode('ascii')
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, six.text_type):
return six.memoryview(b64decode(force_bytes(value)))
return value
class UUIDField(Field):
default_error_messages = {
'invalid': _("'%(value)s' is not a valid UUID."),
}
description = 'Universally unique identifier'
empty_strings_allowed = False
def __init__(self, verbose_name=None, **kwargs):
kwargs['max_length'] = 32
super(UUIDField, self).__init__(verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(UUIDField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "UUIDField"
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
if not isinstance(value, uuid.UUID):
value = self.to_python(value)
if connection.features.has_native_uuid_field:
return value
return value.hex
def to_python(self, value):
if value is not None and not isinstance(value, uuid.UUID):
try:
return uuid.UUID(value)
except (AttributeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
return value
def formfield(self, **kwargs):
defaults = {
'form_class': forms.UUIDField,
}
defaults.update(kwargs)
return super(UUIDField, self).formfield(**defaults)
| apache-2.0 |
buzz2vatsal/Deep-Bench | YOLO/retrain_yolo.py | 1 | 12522 | """
This is a script that can be used to retrain the YOLOv2 model for your own dataset.
"""
import argparse
import os
import matplotlib.pyplot as plt
import numpy as np
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping
from yolo.models.keras_yolo import (preprocess_true_boxes, yolo_body,
yolo_eval, yolo_head, yolo_loss)
from yolo.utils.draw_boxes import draw_boxes
# Args
argparser = argparse.ArgumentParser(
description="Retrain or 'fine-tune' a pretrained YOLOv2 model for your own data.")
argparser.add_argument(
'-d',
'--data_path',
help="path to numpy data file (.npz) containing np.object array 'boxes' and np.uint8 array 'images'",
default=os.path.join('..', 'DATA', 'underwater_data.npz'))
argparser.add_argument(
'-a',
'--anchors_path',
help='path to anchors file, defaults to yolo_anchors.txt',
default=os.path.join('model_data', 'yolo_anchors.txt'))
argparser.add_argument(
'-c',
'--classes_path',
help='path to classes file, defaults to pascal_classes.txt',
default=os.path.join('..', 'DATA', 'underwater_classes.txt'))
# Default anchor boxes
YOLO_ANCHORS = np.array(
((0.57273, 0.677385), (1.87446, 2.06253), (3.33843, 5.47434),
(7.88282, 3.52778), (9.77052, 9.16828)))
def _main(args):
data_path = os.path.expanduser(args.data_path)
classes_path = os.path.expanduser(args.classes_path)
anchors_path = os.path.expanduser(args.anchors_path)
class_names = get_classes(classes_path)
anchors = get_anchors(anchors_path)
data = np.load(data_path) # custom data saved as a numpy file.
# has 2 arrays: an object array 'boxes' (variable length of boxes in each image)
# and an array of images 'images'
image_data, boxes = process_data(data['images'], data['boxes'])
anchors = YOLO_ANCHORS
detectors_mask, matching_true_boxes = get_detector_mask(boxes, anchors)
model_body, model = create_model(anchors, class_names)
train(
model,
class_names,
anchors,
image_data,
boxes,
detectors_mask,
matching_true_boxes
)
draw(model_body,
class_names,
anchors,
image_data,
image_set='val', # assumes training/validation split is 0.9
weights_name='trained_stage_3_best.h5',
save_all=False)
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
if os.path.isfile(anchors_path):
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
else:
Warning("Could not open anchors file, using default.")
return YOLO_ANCHORS
def process_data(images, boxes=None):
'''processes the data'''
images = [PIL.Image.fromarray(i) for i in images]
orig_size = np.array([images[0].width, images[0].height])
orig_size = np.expand_dims(orig_size, axis=0)
# Image preprocessing.
processed_images = [i.resize((416, 416), PIL.Image.BICUBIC) for i in images]
processed_images = [np.array(image, dtype=np.float) for image in processed_images]
processed_images = [image/255. for image in processed_images]
if boxes is not None:
# Box preprocessing.
# Original boxes stored as 1D list of class, x_min, y_min, x_max, y_max.
boxes = [box.reshape((-1, 5)) for box in boxes]
# Get extents as y_min, x_min, y_max, x_max, class for comparision with
# model output.
boxes_extents = [box[:, [2, 1, 4, 3, 0]] for box in boxes]
# Get box parameters as x_center, y_center, box_width, box_height, class.
boxes_xy = [0.5 * (box[:, 3:5] + box[:, 1:3]) for box in boxes]
boxes_wh = [box[:, 3:5] - box[:, 1:3] for box in boxes]
boxes_xy = [boxxy / orig_size for boxxy in boxes_xy]
boxes_wh = [boxwh / orig_size for boxwh in boxes_wh]
boxes = [np.concatenate((boxes_xy[i], boxes_wh[i], box[:, 0:1]), axis=1) for i, box in enumerate(boxes)]
# find the max number of boxes
max_boxes = 0
for boxz in boxes:
if boxz.shape[0] > max_boxes:
max_boxes = boxz.shape[0]
# add zero pad for training
for i, boxz in enumerate(boxes):
if boxz.shape[0] < max_boxes:
zero_padding = np.zeros( (max_boxes-boxz.shape[0], 5), dtype=np.float32)
boxes[i] = np.vstack((boxz, zero_padding))
return np.array(processed_images), np.array(boxes)
else:
return np.array(processed_images)
def get_detector_mask(boxes, anchors):
'''
Precompute detectors_mask and matching_true_boxes for training.
Detectors mask is 1 for each spatial position in the final conv layer and
anchor that should be active for the given boxes and 0 otherwise.
Matching true boxes gives the regression targets for the ground truth box
that caused a detector to be active or 0 otherwise.
'''
detectors_mask = [0 for i in range(len(boxes))]
matching_true_boxes = [0 for i in range(len(boxes))]
for i, box in enumerate(boxes):
detectors_mask[i], matching_true_boxes[i] = preprocess_true_boxes(box, anchors, [416, 416])
return np.array(detectors_mask), np.array(matching_true_boxes)
def create_model(anchors, class_names, load_pretrained=True, freeze_body=True):
'''
returns the body of the model and the model
# Params:
load_pretrained: whether or not to load the pretrained model or initialize all weights
freeze_body: whether or not to freeze all weights except for the last layer's
# Returns:
model_body: YOLOv2 with new output layer
model: YOLOv2 with custom loss Lambda layer
'''
detectors_mask_shape = (13, 13, 5, 1)
matching_boxes_shape = (13, 13, 5, 5)
# Create model input layers.
image_input = Input(shape=(416, 416, 3))
boxes_input = Input(shape=(None, 5))
detectors_mask_input = Input(shape=detectors_mask_shape)
matching_boxes_input = Input(shape=matching_boxes_shape)
# Create model body.
yolo_model = yolo_body(image_input, len(anchors), len(class_names))
topless_yolo = Model(yolo_model.input, yolo_model.layers[-2].output)
if load_pretrained:
# Save topless yolo:
topless_yolo_path = os.path.join('model_data', 'yolo_topless.h5')
if not os.path.exists(topless_yolo_path):
print("CREATING TOPLESS WEIGHTS FILE")
yolo_path = os.path.join('model_data', 'yolo.h5')
model_body = load_model(yolo_path)
model_body = Model(model_body.inputs, model_body.layers[-2].output)
model_body.save_weights(topless_yolo_path)
topless_yolo.load_weights(topless_yolo_path)
if freeze_body:
for layer in topless_yolo.layers:
layer.trainable = False
final_layer = Conv2D(len(anchors)*(5+len(class_names)), (1, 1), activation='linear')(topless_yolo.output)
model_body = Model(image_input, final_layer)
# Place model loss on CPU to reduce GPU memory usage.
with tf.device('/cpu:0'):
# TODO: Replace Lambda with custom Keras layer for loss.
model_loss = Lambda(
yolo_loss,
output_shape=(1, ),
name='yolo_loss',
arguments={'anchors': anchors,
'num_classes': len(class_names)})([
model_body.output, boxes_input,
detectors_mask_input, matching_boxes_input
])
model = Model(
[model_body.input, boxes_input, detectors_mask_input,
matching_boxes_input], model_loss)
return model_body, model
def train(model, class_names, anchors, image_data, boxes, detectors_mask, matching_true_boxes, validation_split=0.1):
'''
retrain/fine-tune the model
logs training with tensorboard
saves training weights in current directory
best weights according to val_loss is saved as trained_stage_3_best.h5
'''
model.compile(
optimizer='adam', loss={
'yolo_loss': lambda y_true, y_pred: y_pred
}) # This is a hack to use the custom loss function in the last layer.
logging = TensorBoard()
checkpoint = ModelCheckpoint("trained_stage_3_best.h5", monitor='val_loss',
save_weights_only=True, save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=15, verbose=1, mode='auto')
model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
np.zeros(len(image_data)),
validation_split=validation_split,
batch_size=32,
epochs=5,
callbacks=[logging])
model.save_weights('trained_stage_1.h5')
model_body, model = create_model(anchors, class_names, load_pretrained=False, freeze_body=False)
model.load_weights('trained_stage_1.h5')
model.compile(
optimizer='adam', loss={
'yolo_loss': lambda y_true, y_pred: y_pred
}) # This is a hack to use the custom loss function in the last layer.
model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
np.zeros(len(image_data)),
validation_split=0.1,
batch_size=8,
epochs=30,
callbacks=[logging])
model.save_weights('trained_stage_2.h5')
model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
np.zeros(len(image_data)),
validation_split=0.1,
batch_size=8,
epochs=30,
callbacks=[logging, checkpoint, early_stopping])
model.save_weights('trained_stage_3.h5')
def draw(model_body, class_names, anchors, image_data, image_set='val',
weights_name='trained_stage_3_best.h5', out_path="output_images", save_all=True):
'''
Draw bounding boxes on image data
'''
if image_set == 'train':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data[:int(len(image_data)*.9)]])
elif image_set == 'val':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data[int(len(image_data)*.9):]])
elif image_set == 'all':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data])
else:
ValueError("draw argument image_set must be 'train', 'val', or 'all'")
# model.load_weights(weights_name)
print(image_data.shape)
model_body.load_weights(weights_name)
# Create output variables for prediction.
yolo_outputs = yolo_head(model_body.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
yolo_outputs, input_image_shape, score_threshold=0.07, iou_threshold=0)
# Run prediction on overfit image.
sess = K.get_session() # TODO: Remove dependence on Tensorflow session.
if not os.path.exists(out_path):
os.makedirs(out_path)
for i in range(len(image_data)):
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict={
model_body.input: image_data[i],
input_image_shape: [image_data.shape[2], image_data.shape[3]],
K.learning_phase(): 0
})
print('Found {} boxes for image.'.format(len(out_boxes)))
print(out_boxes)
# Plot image with predicted boxes.
image_with_boxes = draw_boxes(image_data[i][0], out_boxes, out_classes,
class_names, out_scores)
# Save the image:
if save_all or (len(out_boxes) > 0):
image = PIL.Image.fromarray(image_with_boxes)
image.save(os.path.join(out_path,str(i)+'.png'))
# To display (pauses the program):
# plt.imshow(image_with_boxes, interpolation='nearest')
# plt.show()
if __name__ == '__main__':
args = argparser.parse_args()
_main(args)
| cc0-1.0 |
lokirius/python-for-android | python-build/python-libs/gdata/src/gdata/oauth/rsa.py | 225 | 4528 | #!/usr/bin/python
"""
requires tlslite - http://trevp.net/tlslite/
"""
import binascii
from gdata.tlslite.utils import keyfactory
from gdata.tlslite.utils import cryptomath
# XXX andy: ugly local import due to module name, oauth.oauth
import gdata.oauth as oauth
class OAuthSignatureMethod_RSA_SHA1(oauth.OAuthSignatureMethod):
def get_name(self):
return "RSA-SHA1"
def _fetch_public_cert(self, oauth_request):
# not implemented yet, ideas are:
# (1) do a lookup in a table of trusted certs keyed off of consumer
# (2) fetch via http using a url provided by the requester
# (3) some sort of specific discovery code based on request
#
# either way should return a string representation of the certificate
raise NotImplementedError
def _fetch_private_cert(self, oauth_request):
# not implemented yet, ideas are:
# (1) do a lookup in a table of trusted certs keyed off of consumer
#
# either way should return a string representation of the certificate
raise NotImplementedError
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
oauth.escape(oauth_request.get_normalized_http_method()),
oauth.escape(oauth_request.get_normalized_http_url()),
oauth.escape(oauth_request.get_normalized_parameters()),
)
key = ''
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
key, base_string = self.build_signature_base_string(oauth_request,
consumer,
token)
# Fetch the private key cert based on the request
cert = self._fetch_private_cert(oauth_request)
# Pull the private key from the certificate
privatekey = keyfactory.parsePrivateKey(cert)
# Convert base_string to bytes
#base_string_bytes = cryptomath.createByteArraySequence(base_string)
# Sign using the key
signed = privatekey.hashAndSign(base_string)
return binascii.b2a_base64(signed)[:-1]
def check_signature(self, oauth_request, consumer, token, signature):
decoded_sig = base64.b64decode(signature);
key, base_string = self.build_signature_base_string(oauth_request,
consumer,
token)
# Fetch the public key cert based on the request
cert = self._fetch_public_cert(oauth_request)
# Pull the public key from the certificate
publickey = keyfactory.parsePEMKey(cert, public=True)
# Check the signature
ok = publickey.hashAndVerify(decoded_sig, base_string)
return ok
class TestOAuthSignatureMethod_RSA_SHA1(OAuthSignatureMethod_RSA_SHA1):
def _fetch_public_cert(self, oauth_request):
cert = """
-----BEGIN CERTIFICATE-----
MIIBpjCCAQ+gAwIBAgIBATANBgkqhkiG9w0BAQUFADAZMRcwFQYDVQQDDA5UZXN0
IFByaW5jaXBhbDAeFw03MDAxMDEwODAwMDBaFw0zODEyMzEwODAwMDBaMBkxFzAV
BgNVBAMMDlRlc3QgUHJpbmNpcGFsMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
gQC0YjCwIfYoprq/FQO6lb3asXrxLlJFuCvtinTF5p0GxvQGu5O3gYytUvtC2JlY
zypSRjVxwxrsuRcP3e641SdASwfrmzyvIgP08N4S0IFzEURkV1wp/IpH7kH41Etb
mUmrXSwfNZsnQRE5SYSOhh+LcK2wyQkdgcMv11l4KoBkcwIDAQABMA0GCSqGSIb3
DQEBBQUAA4GBAGZLPEuJ5SiJ2ryq+CmEGOXfvlTtEL2nuGtr9PewxkgnOjZpUy+d
4TvuXJbNQc8f4AMWL/tO9w0Fk80rWKp9ea8/df4qMq5qlFWlx6yOLQxumNOmECKb
WpkUQDIDJEoFUzKMVuJf4KO/FJ345+BNLGgbJ6WujreoM1X/gYfdnJ/J
-----END CERTIFICATE-----
"""
return cert
def _fetch_private_cert(self, oauth_request):
cert = """
-----BEGIN PRIVATE KEY-----
MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALRiMLAh9iimur8V
A7qVvdqxevEuUkW4K+2KdMXmnQbG9Aa7k7eBjK1S+0LYmVjPKlJGNXHDGuy5Fw/d
7rjVJ0BLB+ubPK8iA/Tw3hLQgXMRRGRXXCn8ikfuQfjUS1uZSatdLB81mydBETlJ
hI6GH4twrbDJCR2Bwy/XWXgqgGRzAgMBAAECgYBYWVtleUzavkbrPjy0T5FMou8H
X9u2AC2ry8vD/l7cqedtwMPp9k7TubgNFo+NGvKsl2ynyprOZR1xjQ7WgrgVB+mm
uScOM/5HVceFuGRDhYTCObE+y1kxRloNYXnx3ei1zbeYLPCHdhxRYW7T0qcynNmw
rn05/KO2RLjgQNalsQJBANeA3Q4Nugqy4QBUCEC09SqylT2K9FrrItqL2QKc9v0Z
zO2uwllCbg0dwpVuYPYXYvikNHHg+aCWF+VXsb9rpPsCQQDWR9TT4ORdzoj+Nccn
qkMsDmzt0EfNaAOwHOmVJ2RVBspPcxt5iN4HI7HNeG6U5YsFBb+/GZbgfBT3kpNG
WPTpAkBI+gFhjfJvRw38n3g/+UeAkwMI2TJQS4n8+hid0uus3/zOjDySH3XHCUno
cn1xOJAyZODBo47E+67R4jV1/gzbAkEAklJaspRPXP877NssM5nAZMU0/O/NGCZ+
3jPgDUno6WbJn5cqm8MqWhW1xGkImgRk+fkDBquiq4gPiT898jusgQJAd5Zrr6Q8
AO/0isr/3aa6O6NLQxISLKcPDk2NOccAfS/xOtfOz4sJYM3+Bs4Io9+dZGSDCA54
Lw03eHTNQghS0A==
-----END PRIVATE KEY-----
"""
return cert
| apache-2.0 |
LuckDragon82/demo | boilerplate/external/linkedin/linkedin.py | 11 | 35307 | # -*- coding: utf-8 -*-
#######################################################################################
# Python implementation of LinkedIn OAuth Authorization, Profile and Connection APIs. #
# #
# Author: Ozgur Vatansever #
# Email : ozgurvt@gmail.com #
# LinkedIn Account: http://www.linkedin.com/in/ozgurvt #
#######################################################################################
__version__ = "1.8.1"
"""
Provides a Pure Python LinkedIn API Interface.
"""
try:
import sha
except DeprecationWarning, derr:
import hashlib
sha = hashlib.sha1
import urllib, time, random, httplib, hmac, binascii, cgi, string
from HTMLParser import HTMLParser
from model import *
class OAuthError(Exception):
"""
General OAuth exception, nothing special.
"""
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class Stripper(HTMLParser):
"""
Stripper class that strips HTML entity.
"""
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def getAlteredData(self):
return ''.join(self.fed)
class XMLBuilder(object):
def __init__(self, rootTagName):
self.document = minidom.Document()
self.root = self.document.createElement(rootTagName)
self.document.appendChild(self.root)
def xml(self):
return self.document.toxml()
def __unicode__(self):
return self.document.toprettyxml()
def append_element_to_root(self, element):
self.root.appendChild(element)
def append_list_of_elements_to_element(self, element, elements):
map(lambda x:element.appendChild(x),elements)
return element
def create_element(self, tag_name):
return self.document.createElement(str(tag_name))
def create_element_with_text_node(self, tag_name, text_node):
text_node = self.document.createTextNode(str(text_node))
element = self.document.createElement(str(tag_name))
element.appendChild(text_node)
return element
def create_elements(self, **elements):
return [self.create_element_with_text_node(tag_name, text_node) for tag_name, text_node in elements.items()]
class ConnectionError(Exception):
pass
class LinkedIn(object):
def __init__(self, api_key, api_secret, callback_url, gae = False):
"""
LinkedIn Base class that simply implements LinkedIn OAuth Authorization and LinkedIn APIs such as Profile, Connection vs.
@ LinkedIn OAuth Authorization
------------------------------
In OAuth terminology, there are 2 tokens that we need in order to have permission to perform an API request.
Those are request_token and access_token. Thus, this class basicly intends to wrap methods of OAuth spec. which
are related of gettting request_token and access_token strings.
@ Important Note:
-----------------
HMAC-SHA1 hashing algorithm will be used while encrypting a request body of an HTTP request. Other alternatives
such as 'SHA-1' or 'PLAINTEXT' are ignored.
@Reference for OAuth
--------------------
Please take a look at the link below if you have a basic knowledge of HTTP protocol
- http://developer.linkedin.com/docs/DOC-1008
Please create an application from the link below if you do not have an API key and secret key yet.
- https://www.linkedin.com/secure/developer
@api_key: Your API key
@api_secret: Your API secret key
@callback_url: the return url when the user grants permission to Consumer.
"""
# Credientials
self.API_ENDPOINT = "api.linkedin.com"
self.BASE_URL = "https://%s" % self.API_ENDPOINT
self.VERSION = "1.0"
self._api_key = api_key
self._api_secret = api_secret
self._callback_url = callback_url
self._gae = gae # Is it google app engine
self._request_token = None # that comes later
self._access_token = None # that comes later and later
self._request_token_secret = None
self._access_token_secret = None
self._verifier = None
self._error = None
def request_token(self):
"""
Performs the corresponding API which returns the request token in a query string
The POST Querydict must include the following:
* oauth_callback
* oauth_consumer_key
* oauth_nonce
* oauth_signature_method
* oauth_timestamp
* oauth_version
"""
self.clear()
method = "GET"
relative_url = "/uas/oauth/requestToken"
query_dict = self._query_dict({"oauth_callback" : self._callback_url})
self._calc_signature(self._get_url(relative_url), query_dict, self._request_token_secret, method)
try:
response = self._https_connection(method, relative_url, query_dict)
except ConnectionError:
return False
oauth_problem = self._get_value_from_raw_qs("oauth_problem", response)
if oauth_problem:
self._error = oauth_problem
return False
self._request_token = self._get_value_from_raw_qs("oauth_token", response)
self._request_token_secret = self._get_value_from_raw_qs("oauth_token_secret", response)
return True
def access_token(self, request_token = None, request_token_secret = None, verifier = None):
"""
Performs the corresponding API which returns the access token in a query string
Accroding to the link (http://developer.linkedin.com/docs/DOC-1008), POST Querydict must include the following:
* oauth_consumer_key
* oauth_nonce
* oauth_signature_method
* oauth_timestamp
* oauth_token (request token)
* oauth_version
"""
self._request_token = request_token and request_token or self._request_token
self._request_token_secret = request_token_secret and request_token_secret or self._request_token_secret
self._verifier = verifier and verifier or self._verifier
# if there is no request token, fail immediately
if self._request_token is None:
raise OAuthError("There is no Request Token. Please perform 'request_token' method and obtain that token first.")
if self._request_token_secret is None:
raise OAuthError("There is no Request Token Secret. Please perform 'request_token' method and obtain that token first.")
if self._verifier is None:
raise OAuthError("There is no Verifier Key. Please perform 'request_token' method, redirect user to API authorize page and get the _verifier.")
method = "GET"
relative_url = "/uas/oauth/accessToken"
query_dict = self._query_dict({
"oauth_token" : self._request_token,
"oauth_verifier" : self._verifier
})
self._calc_signature(self._get_url(relative_url), query_dict, self._request_token_secret, method)
try:
response = self._https_connection(method, relative_url, query_dict)
except ConnectionError:
return False
oauth_problem = self._get_value_from_raw_qs("oauth_problem", response)
if oauth_problem:
self._error = oauth_problem
return False
self._access_token = self._get_value_from_raw_qs("oauth_token", response)
self._access_token_secret = self._get_value_from_raw_qs("oauth_token_secret", response)
return True
def get_profile(self, member_id = None, url = None, fields=[]):
"""
Gets the public profile for a specific user. It is determined by his/her member id or public url.
If none of them is given, the information og the application's owner are returned.
If none of them are given, current user's details are fetched.
The argument 'fields' determines how much information will be fetched.
Examples:
client.get_profile(merber_id = 123, url = None, fields=['first-name', 'last-name']) : fetches the profile of a user whose id is 123.
client.get_profile(merber_id = None, url = None, fields=['first-name', 'last-name']) : fetches current user's profile
client.get_profile(member_id = None, 'http://www.linkedin.com/in/ozgurv') : fetches the profile of a user whose profile url is http://www.linkedin.com/in/ozgurv
@ Returns Profile instance
"""
#################
# BEGIN ROUTINE #
#################
# if there is no access token or secret, fail immediately
self._check_tokens()
# specify the url according to the parameters given
raw_url = "/v1/people/"
if url:
url = self._quote(url)
raw_url = (raw_url + "url=%s:public") % url
elif member_id:
raw_url = (raw_url + "id=%s" % member_id)
else:
raw_url = raw_url + "~"
if url is None:
fields = ":(%s)" % ",".join(fields) if len(fields) > 0 else None
if fields:
raw_url = raw_url + fields
try:
response = self._do_normal_query(raw_url)
return Profile.create(response) # this creates Profile instance or gives you null
except ConnectionError:
return None
def get_connections(self, member_id = None, public_url = None, fields=[]):
"""
Fetches the connections of a user whose id is the given member_id or url is the given public_url
If none of the parameters given, the connections of the current user are fetched.
@Returns: a list of Profile instances or an empty list if there is no connection.
Example urls:
* http://api.linkedin.com/v1/people/~/connections (for current user)
* http://api.linkedin.com/v1/people/id=12345/connections (fetch with member_id)
* http://api.linkedin.com/v1/people/url=http%3A%2F%2Fwww.linkedin.com%2Fin%2Flbeebe/connections (fetch with public_url)
"""
self._check_tokens()
raw_url = "/v1/people/%s/connections"
if member_id:
raw_url = raw_url % ("id=" + member_id)
elif public_url:
raw_url = raw_url % ("url=" + self._quote(public_url))
else:
raw_url = raw_url % "~"
fields = ":(%s)" % ",".join(fields) if len(fields) > 0 else None
if fields:
raw_url = raw_url + fields
try:
response = self._do_normal_query(raw_url)
document = minidom.parseString(response)
connections = document.getElementsByTagName("person")
result = []
for connection in connections:
profile = Profile.create(connection.toxml())
if profile is not None:
result.append(profile)
return result
except ConnectionError:
return None
def get_search(self, parameters):
"""
Use the Search API to find LinkedIn profiles using keywords,
company, name, or other methods. This returns search results,
which are an array of matching member profiles. Each matching
profile is similar to a mini-profile popup view of LinkedIn
member profiles.
Request URL Structure:
http://api.linkedin.com/v1/people?keywords=['+' delimited keywords]&name=[first name + last name]&company=[company name]¤t-company=[true|false]&title=[title]¤t-title=[true|false]&industry-code=[industry code]&search-location-type=[I,Y]&country-code=[country code]&postal-code=[postal code]&network=[in|out]&start=[number]&count=[1-10]&sort-criteria=[ctx|endorsers|distance|relevance]
"""
self._check_tokens()
try:
response = self._do_normal_query("/v1/people", method="GET", params=parameters)
except ConnectionError:
return None
error = self._parse_error(response)
if error:
self._error = error
return None
document = minidom.parseString(response)
connections = document.getElementsByTagName("person")
result = []
for connection in connections:
profile = Profile.create(connection.toxml())
if profile is not None:
result.append(profile)
return result
def send_message(self, subject, message, ids = [], send_yourself = False):
"""
Sends a Non-HTML message and subject to the members whose IDs are given as a parameter 'ids'.
If the user gives more than 10 ids, the IDs after 10th ID are ignored.
@Input: string x string x list x bool
@Output: bool
Returns True if successfully sends the message otherwise returns False.
Important Note: You can send a message at most 10 connections at one time.
Technical Explanation:
---------------------
Sends a POST request to the URL 'http://api.linkedin.com/v1/people/~/mailbox'.
The XML that will be sent looks like this:
<?xml VERSION='1.0' encoding='UTF-8'?>
<mailbox-item>
<recipients>
<recipient>
<person path='/people/{id}' />
</recipient>
</recipients>
<subject>{subject}</subject>
<body>{message}</body>
</mailbox-item>
The resulting XML would be like this:
if result is None or '', it is guaranteed that you sent the message. If there occurs an error, you get the following:
<?xml VERSION='1.0' encoding='UTF-8' standalone='yes'?>
<error>
<status>...</status>
<timestamp>...</timestamp>
<error-code>...</error-code>
<message>...</message>
</error>
"""
#######################################################################################
# What we do here is first we need to shorten to ID list to 10 elements just in case. #
# Then we need to strip HTML tags using HTMLParser library. #
# Then we are going to build up the XML body and post the request. #
# According to the response parsed, we return True or False. #
#######################################################################################
self._check_tokens()
# Shorten the list.
ids = ids[:10]
if send_yourself:
ids = ids[:9]
ids.append("~")
subjectStripper = Stripper()
subjectStripper.feed(subject)
subject = subjectStripper.getAlteredData()
bodyStripper = Stripper()
bodyStripper.feed(message)
body = bodyStripper.getAlteredData()
# Build up the POST body.
builder = XMLBuilder("mailbox-item")
recipients_element = builder.create_element("recipients")
subject_element = builder.create_element_with_text_node("subject", subject)
body_element = builder.create_element_with_text_node("body", body)
for member_id in ids:
recipient_element = builder.create_element("recipient")
person_element = builder.create_element("person")
person_element.setAttribute("path", "/people/%s" % member_id)
recipient_element.appendChild(person_element)
recipients_element.appendChild(recipient_element)
builder.append_element_to_root(recipients_element)
builder.append_element_to_root(subject_element)
builder.append_element_to_root(body_element)
body = builder.xml()
try:
response = self._do_normal_query("/v1/people/~/mailbox", body=body, method="POST")
# If API server sends us a response, we know that there occurs an error.
# So we have to parse the response to make sure what causes the error.
# and let the user know by returning False.
if response:
self._error = self._parse_error(response)
return False
except ConnectionError:
return False
return True
def send_invitation(self, subject, message, first_name, last_name, email):
"""
Sends an invitation to the given email address.
This method is very similiar to 'send_message' method.
@input: string x string x string x string x string
@output: bool
"""
#########################################################################################
# What we do here is first, we need to check the access token. #
# Then we need to strip HTML tags using the HTMLParser library. #
# Then we are going to build up the XML body and post the request. #
# According to the response parsed, we return True or False. #
#########################################################################################
self._check_tokens()
subjectStripper = Stripper()
subjectStripper.feed(subject)
subject = subjectStripper.getAlteredData()
bodyStripper = Stripper()
bodyStripper.feed(message)
body = bodyStripper.getAlteredData()
# Build up the POST body.
builder = XMLBuilder("mailbox-item")
recipients_element = builder.create_element("recipients")
subject_element = builder.create_element_with_text_node("subject", subject)
body_element = builder.create_element_with_text_node("body", body)
recipient_element = builder.create_element("recipient")
person_element = builder.create_element("person")
person_element.setAttribute("path", "/people/email=%s" % email)
first_name_element = builder.create_element_with_text_node("first-name", first_name)
last_name_element = builder.create_element_with_text_node("last-name", last_name)
builder.append_list_of_elements_to_element(person_element, [first_name_element, last_name_element])
recipient_element.appendChild(person_element)
recipients_element.appendChild(recipient_element)
item_content_element = builder.create_element("item-content")
invitation_request_element = builder.create_element("invitation-request")
connect_type_element = builder.create_element_with_text_node("connect-type", "friend")
invitation_request_element.appendChild(connect_type_element)
item_content_element.appendChild(invitation_request_element)
builder.append_element_to_root(recipients_element)
builder.append_element_to_root(subject_element)
builder.append_element_to_root(body_element)
builder.append_element_to_root(item_content_element)
body = builder.xml()
try:
response = self._do_normal_query("/v1/people/~/mailbox", body=body, method="POST")
# If API server sends us a response, we know that there occurs an error.
# So we have to parse the response to make sure what causes the error.
# and let the user know by returning False.
if response:
self._error = self._parse_error(response)
return False
except ConnectionError:
return False
return True
def set_status(self, status_message):
"""
This API is deprecated and should be replaced with the share status of linkedin
Issues a status of the connected user. There is a 140 character limit on status message.
If it is longer than 140 characters, it is shortened.
-----------
Usage Rules
* We must use an access token from the user.
* We can only set status for the user who grants us access.
-----------
@input: string
@output: bool
"""
self._check_tokens()
# Shorten the message just in case.
status_message = str(status_message)
if len(status_message) > 140:
status_message = status_message[:140]
# Build up the XML request
builder = XMLBuilder("current-status")
status_node = builder.document.createTextNode(status_message)
builder.root.appendChild(status_node)
body = builder.xml()
try:
response = self._do_normal_query("/v1/people/~/current-status", body=body, method="PUT")
# If API server sends us a response, we know that there occurs an error.
# So we have to parse the response to make sure what causes the error.
# and let the user know by returning False.
if response:
self._error = self._parse_error(response)
return False
except ConnectionError:
return False
return True
def clear_status(self):
"""
This API is deprecated and should be replaced with the share status of linkedin
Clears the status of the connected user.
-----------
Usage Rules
* We must use an access token from the user.
* We can only set status for the user who grants us access.
-----------
@input: none
@output: bool
"""
self._check_tokens()
try:
response = self._do_normal_query("/v1/people/~/current-status", method="DELETE")
# If API server sends us a response, we know that there occurs an error.
# So we have to parse the response to make sure what causes the error.
# and let the user know by returning False.
if response:
self._error = self._parse_error(response)
return False
except ConnectionError:
return False
return True
def share_update(self, comment=None, title=None, submitted_url=None,
submitted_image_url=None, description=None,
visibility="connections-only"):
"""
Use the Share API to have a member share content with their network or with all of LinkedIn
-----------
Usage Rules
* We must use an access token from the user.
* We can only share items for the user who grants us access.
-----------
visibility: anyone or connections-only
@output: bool
SAMPLE
<?xml VERSION="1.0" encoding="UTF-8"?>
<share>
<comment>83% of employers will use social media to hire: 78% LinkedIn, 55% Facebook, 45% Twitter [SF Biz Times] http://bit.ly/cCpeOD</comment>
<content>
<title>Survey: Social networks top hiring tool - San Francisco Business Times</title>
<submitted-url>http://sanfrancisco.bizjournals.com/sanfrancisco/stories/2010/06/28/daily34.html</submitted-url>
<submitted-image-url>http://images.bizjournals.com/travel/cityscapes/thumbs/sm_sanfrancisco.jpg</submitted-image-url>
</content>
<visibility>
<code>anyone</code>
</visibility>
</share>
"""
self._check_tokens()
if comment is not None:
comment = str(comment)
if len(comment) > 700:
comment = comment[:700]
if title is not None:
title = str(title)
if len(title) > 200:
title = title[:200]
if description is not None:
description = str(description)
if len(description) > 400:
description = description[:400]
# Build up the XML request
builder = XMLBuilder("share")
if len(comment) > 0:
comment_element = builder.create_element_with_text_node("comment", comment)
builder.append_element_to_root(comment_element)
if (submitted_url is not None) or (title is not None):
content_element = builder.create_element("content")
if submitted_url is not None:
submitted_url_element = builder.create_element_with_text_node("submitted-url", submitted_url)
content_element.appendChild(submitted_url_element)
# must have url to inlcude image url
if submitted_image_url is not None:
submitted_image_url_element = builder.create_element_with_text_node("submitted-image-url", submitted_image_url)
content_element.appendChild(submitted_image_url_element)
if title is not None:
title_element = builder.create_element_with_text_node("title", title)
content_element.appendChild(title_element)
if description is not None:
description_element = builder.create_element_with_text_node("description", description)
content_element.appendChild(description_element)
builder.append_element_to_root(content_element)
visibility_element = builder.create_element("visibility")
code_element = builder.create_element_with_text_node("code", visibility)
visibility_element.appendChild(code_element)
builder.append_element_to_root(visibility_element)
body = builder.xml()
try:
response = self._do_normal_query("/v1/people/~/shares", body=body, method="POST")
# If API server sends us a response, we know that there occurs an error.
# So we have to parse the response to make sure what causes the error.
# and let the user know by returning False.
if response:
self._error = self._parse_error(response)
return False
except ConnectionError:
return False
return True
def get_authorize_url(self, request_token = None):
self._request_token = request_token and request_token or self._request_token
if self._request_token is None:
raise OAuthError("OAuth Request Token is NULL. Plase acquire this first.")
return "%s%s?oauth_token=%s" % (self.BASE_URL, "/uas/oauth/authorize", self._request_token)
def get_error(self):
return self._error
def clear(self):
self._request_token = None
self._access_token = None
self._verifier = None
self._request_token_secret = None
self._access_token_secret = None
self._error = None
#################################################
# HELPER FUNCTIONS #
# You do not explicitly use those methods below #
#################################################
def _generate_nonce(self, length = 20):
return ''.join([string.letters[random.randint(0, len(string.letters) - 1)] for i in range(length)])
def _get_url(self, relative_path):
return self.BASE_URL + relative_path
def _generate_timestamp(self):
return str(int(time.time()))
def _quote(self, st):
return urllib.quote(st, safe='~')
def _utf8(self, st):
return isinstance(st, unicode) and st.encode("utf-8") or str(st)
def _urlencode(self, query_dict):
keys_and_values = [(self._quote(self._utf8(k)), self._quote(self._utf8(v))) for k,v in query_dict.items()]
keys_and_values.sort()
return '&'.join(['%s=%s' % (k, v) for k, v in keys_and_values])
def _get_value_from_raw_qs(self, key, qs):
raw_qs = cgi.parse_qs(qs, keep_blank_values = False)
rs = raw_qs.get(key)
if type(rs) == list:
return rs[0]
else:
return rs
def _signature_base_string(self, method, uri, query_dict):
return "&".join([self._quote(method), self._quote(uri), self._quote(self._urlencode(query_dict))])
def _parse_error(self, str_as_xml):
"""
Helper function in order to get error message from an xml string.
In coming xml can be like this:
<?xml VERSION='1.0' encoding='UTF-8' standalone='yes'?>
<error>
<status>404</status>
<timestamp>1262186271064</timestamp>
<error-code>0000</error-code>
<message>[invalid.property.name]. Couldn't find property with name: first_name</message>
</_error>
"""
try:
xmlDocument = minidom.parseString(str_as_xml)
if len(xmlDocument.getElementsByTagName("error")) > 0:
error = xmlDocument.getElementsByTagName("message")
if error:
error = error[0]
return error.childNodes[0].nodeValue
return None
except OAuthError, detail:
# raise detail
raise OAuthError("Invalid XML String given: error: %s" % repr(detail))
def _create_oauth_header(self, query_dict):
header = 'OAuth realm="http://api.linkedin.com", '
header += ", ".join(['%s="%s"' % (k, self._quote(query_dict[k]))
for k in sorted(query_dict)])
return header
def _query_dict(self, additional = {}):
query_dict = {"oauth_consumer_key": self._api_key,
"oauth_nonce": self._generate_nonce(),
"oauth_signature_method": "HMAC-SHA1",
"oauth_timestamp": self._generate_timestamp(),
"oauth_version": self.VERSION
}
query_dict.update(additional)
return query_dict
def _do_normal_query(self, relative_url, body=None, method="GET", params=None):
method = method
query_dict = self._query_dict({"oauth_token" : self._access_token})
signature_dict = dict(query_dict)
if (params):
signature_dict.update(params)
query_dict["oauth_signature"] = self._calc_signature(self._get_url(relative_url),
signature_dict, self._access_token_secret, method, update=False)
if (params):
relative_url = "%s?%s" % (relative_url, self._urlencode(params))
response = self._https_connection(method, relative_url, query_dict, body)
if (response):
error = self._parse_error(response)
if error:
self._error = error
raise ConnectionError()
return response
def _check_tokens(self):
if self._access_token is None:
self._error = "There is no Access Token. Please perform 'access_token' method and obtain that token first."
raise OAuthError(self._error)
if self._access_token_secret is None:
self._error = "There is no Access Token Secret. Please perform 'access_token' method and obtain that token first."
raise OAuthError(self._error)
def _calc_key(self, token_secret):
key = self._quote(self._api_secret) + "&"
if (token_secret):
key += self._quote(token_secret)
return key
def _calc_signature(self, url, query_dict, token_secret, method = "GET", update=True):
if token_secret != None:
token_secret = token_secret.encode('ascii')
query_string = self._quote(self._urlencode(query_dict))
signature_base_string = "&".join([self._quote(method), self._quote(url), query_string])
hashed = hmac.new(self._calc_key(token_secret), signature_base_string, sha)
signature = binascii.b2a_base64(hashed.digest())[:-1]
if (update):
query_dict["oauth_signature"] = signature
return signature
def _https_connection(self, method, relative_url, query_dict, body=None):
if (self._gae):
return self._https_connection_gae(method, relative_url, query_dict, body)
else:
return self._https_connection_regular(method, relative_url, query_dict, body)
def _https_connection_regular(self, method, relative_url, query_dict, body = None):
header = self._create_oauth_header(query_dict)
connection = None
try:
connection = httplib.HTTPSConnection(self.API_ENDPOINT)
connection.request(method, relative_url, body = body,
headers={'Authorization':header})
response = connection.getresponse()
if response is None:
self._error = "No HTTP response received."
raise ConnectionError()
return response.read()
finally:
if (connection):
connection.close()
def _https_connection_gae(self, method, relative_url, query_dict, body = None):
from google.appengine.api import urlfetch
if (method == "GET"):
method = urlfetch.GET
elif (method == "POST"):
method = urlfetch.POST
elif (method == "PUT"):
method = urlfetch.PUT
elif (method == "DELETE"):
method = urlfetch.DELETE
header = self._create_oauth_header(query_dict)
headers = {'Authorization':header}
if (body):
headers["Content-Type"] = "text/xml"
url = self._get_url(relative_url)
rpc = urlfetch.create_rpc(deadline=10.0)
urlfetch.make_fetch_call(rpc, url, method=method, headers=headers,
payload=body)
return rpc.get_result().content
########################
# END HELPER FUNCTIONS #
########################
| lgpl-3.0 |
4022321818/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/xml/sax/_exceptions.py | 625 | 4885 | """Different kinds of SAX Exceptions"""
#in brython the 4 lines below causes an $globals['Exception'] error
#import sys
#if sys.platform[:4] == "java":
# from java.lang import Exception
#del sys
# ===== SAXEXCEPTION =====
class SAXException(Exception):
"""Encapsulate an XML error or warning. This class can contain
basic error or warning information from either the XML parser or
the application: you can subclass it to provide additional
functionality, or to add localization. Note that although you will
receive a SAXException as the argument to the handlers in the
ErrorHandler interface, you are not actually required to raise
the exception; instead, you can simply read the information in
it."""
def __init__(self, msg, exception=None):
"""Creates an exception. The message is required, but the exception
is optional."""
self._msg = msg
self._exception = exception
Exception.__init__(self, msg)
def getMessage(self):
"Return a message for this exception."
return self._msg
def getException(self):
"Return the embedded exception, or None if there was none."
return self._exception
def __str__(self):
"Create a string representation of the exception."
return self._msg
def __getitem__(self, ix):
"""Avoids weird error messages if someone does exception[ix] by
mistake, since Exception has __getitem__ defined."""
raise AttributeError("__getitem__")
# ===== SAXPARSEEXCEPTION =====
class SAXParseException(SAXException):
"""Encapsulate an XML parse error or warning.
This exception will include information for locating the error in
the original XML document. Note that although the application will
receive a SAXParseException as the argument to the handlers in the
ErrorHandler interface, the application is not actually required
to raise the exception; instead, it can simply read the
information in it and take a different action.
Since this exception is a subclass of SAXException, it inherits
the ability to wrap another exception."""
def __init__(self, msg, exception, locator):
"Creates the exception. The exception parameter is allowed to be None."
SAXException.__init__(self, msg, exception)
self._locator = locator
# We need to cache this stuff at construction time.
# If this exception is raised, the objects through which we must
# traverse to get this information may be deleted by the time
# it gets caught.
self._systemId = self._locator.getSystemId()
self._colnum = self._locator.getColumnNumber()
self._linenum = self._locator.getLineNumber()
def getColumnNumber(self):
"""The column number of the end of the text where the exception
occurred."""
return self._colnum
def getLineNumber(self):
"The line number of the end of the text where the exception occurred."
return self._linenum
def getPublicId(self):
"Get the public identifier of the entity where the exception occurred."
return self._locator.getPublicId()
def getSystemId(self):
"Get the system identifier of the entity where the exception occurred."
return self._systemId
def __str__(self):
"Create a string representation of the exception."
sysid = self.getSystemId()
if sysid is None:
sysid = "<unknown>"
linenum = self.getLineNumber()
if linenum is None:
linenum = "?"
colnum = self.getColumnNumber()
if colnum is None:
colnum = "?"
return "%s:%s:%s: %s" % (sysid, linenum, colnum, self._msg)
# ===== SAXNOTRECOGNIZEDEXCEPTION =====
class SAXNotRecognizedException(SAXException):
"""Exception class for an unrecognized identifier.
An XMLReader will raise this exception when it is confronted with an
unrecognized feature or property. SAX applications and extensions may
use this class for similar purposes."""
pass
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXNotSupportedException(SAXException):
"""Exception class for an unsupported operation.
An XMLReader will raise this exception when a service it cannot
perform is requested (specifically setting a state or value). SAX
applications and extensions may use this class for similar
purposes."""
pass
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXReaderNotAvailable(SAXNotSupportedException):
"""Exception class for a missing driver.
An XMLReader module (driver) should raise this exception when it
is first imported, e.g. when a support module cannot be imported.
It also may be raised during parsing, e.g. if executing an external
program is not permitted."""
pass
| gpl-3.0 |
mach0/QGIS | python/plugins/processing/modeler/OpenModelFromFileAction.py | 33 | 2413 | # -*- coding: utf-8 -*-
"""
***************************************************************************
OpenModelFromFileAction.py
---------------------
Date : February 2018
Copyright : (C) 2018 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'February 2018'
__copyright__ = '(C) 2018, Nyall Dawson'
import os
from qgis.PyQt.QtWidgets import QFileDialog
from qgis.PyQt.QtCore import QFileInfo, QCoreApplication
from qgis.core import QgsApplication, QgsSettings
from qgis.utils import iface
from processing.gui.ToolboxAction import ToolboxAction
from processing.modeler.ModelerDialog import ModelerDialog
pluginPath = os.path.split(os.path.dirname(__file__))[0]
class OpenModelFromFileAction(ToolboxAction):
def __init__(self):
self.name = QCoreApplication.translate('OpenModelFromFileAction', 'Open Existing Model…')
self.group = self.tr('Tools')
def getIcon(self):
return QgsApplication.getThemeIcon("/processingModel.svg")
def execute(self):
settings = QgsSettings()
lastDir = settings.value('Processing/lastModelsDir', '')
filename, selected_filter = QFileDialog.getOpenFileName(self.toolbox,
self.tr('Open Model', 'AddModelFromFileAction'), lastDir,
self.tr('Processing models (*.model3 *.MODEL3)', 'AddModelFromFileAction'))
if filename:
settings.setValue('Processing/lastModelsDir',
QFileInfo(filename).absoluteDir().absolutePath())
dlg = ModelerDialog.create()
dlg.loadModel(filename)
dlg.show()
| gpl-2.0 |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/numpy/distutils/extension.py | 89 | 2967 | """distutils.extension
Provides the Extension class, used to describe C/C++ extension
modules in setup scripts.
Overridden to support f2py.
"""
from __future__ import division, absolute_import, print_function
import sys
import re
from distutils.extension import Extension as old_Extension
if sys.version_info[0] >= 3:
basestring = str
cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match
fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
class Extension(old_Extension):
def __init__ (
self, name, sources,
include_dirs=None,
define_macros=None,
undef_macros=None,
library_dirs=None,
libraries=None,
runtime_library_dirs=None,
extra_objects=None,
extra_compile_args=None,
extra_link_args=None,
export_symbols=None,
swig_opts=None,
depends=None,
language=None,
f2py_options=None,
module_dirs=None,
extra_f77_compile_args=None,
extra_f90_compile_args=None,):
old_Extension.__init__(
self, name, [],
include_dirs=include_dirs,
define_macros=define_macros,
undef_macros=undef_macros,
library_dirs=library_dirs,
libraries=libraries,
runtime_library_dirs=runtime_library_dirs,
extra_objects=extra_objects,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
export_symbols=export_symbols)
# Avoid assert statements checking that sources contains strings:
self.sources = sources
# Python 2.4 distutils new features
self.swig_opts = swig_opts or []
# swig_opts is assumed to be a list. Here we handle the case where it
# is specified as a string instead.
if isinstance(self.swig_opts, basestring):
import warnings
msg = "swig_opts is specified as a string instead of a list"
warnings.warn(msg, SyntaxWarning, stacklevel=2)
self.swig_opts = self.swig_opts.split()
# Python 2.3 distutils new features
self.depends = depends or []
self.language = language
# numpy_distutils features
self.f2py_options = f2py_options or []
self.module_dirs = module_dirs or []
self.extra_f77_compile_args = extra_f77_compile_args or []
self.extra_f90_compile_args = extra_f90_compile_args or []
return
def has_cxx_sources(self):
for source in self.sources:
if cxx_ext_re(str(source)):
return True
return False
def has_f2py_sources(self):
for source in self.sources:
if fortran_pyf_ext_re(source):
return True
return False
# class Extension
| bsd-3-clause |
chouseknecht/ansible | lib/ansible/module_utils/network/iosxr/argspec/l2_interfaces/l2_interfaces.py | 21 | 2647 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The arg spec for the iosxr_l2_interfaces module
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class L2_InterfacesArgs(object):
def __init__(self, **kwargs):
pass
argument_spec = {'config': {'elements': 'dict',
'options': {'name': {'type': 'str', 'required': True},
'native_vlan': {'type': 'int'},
'l2transport': {'type': 'bool'},
'l2protocol': {'element': 'dict',
'type': 'list',
'options': {'cdp': {'type': 'str',
'choices': ['drop', 'forward', 'tunnel']},
'pvst': {'type': 'str',
'choices': ['drop', 'forward', 'tunnel']},
'stp': {'type': 'str',
'choices': ['drop', 'forward', 'tunnel']},
'vtp': {'type': 'str',
'choices': ['drop', 'forward', 'tunnel']},
}},
'q_vlan': {'type': 'list'},
'propagate': {'type': 'bool'}},
'type': 'list'},
'state': {'choices': ['merged', 'replaced', 'overridden', 'deleted'],
'default': 'merged',
'type': 'str'}}
| gpl-3.0 |
Twentysix26/26-Cogs | penis/penis.py | 1 | 1042 | import discord
import random
from discord.ext import commands
from cogs.utils.chat_formatting import pagify
class Penis:
"""Penis related commands."""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def penis(self, ctx, *users: discord.Member):
"""Detects user's penis length
This is 100% accurate.
Enter multiple users for an accurate comparison!"""
if not users:
await self.bot.send_cmd_help(ctx)
return
dongs = {}
msg = ""
state = random.getstate()
for user in users:
random.seed(user.id)
dongs[user] = "8{}D".format("=" * random.randint(0, 30))
random.setstate(state)
dongs = sorted(dongs.items(), key=lambda x: x[1])
for user, dong in dongs:
msg += "**{}'s size:**\n{}\n".format(user.display_name, dong)
for page in pagify(msg):
await self.bot.say(page)
def setup(bot):
bot.add_cog(Penis(bot))
| gpl-3.0 |
jbair34/moose | framework/contrib/nsiqcppstyle/rules/RULE_6_5_B_do_not_use_macro_for_constants.py | 43 | 2094 | """
Do not use macro for the constants.
if the constants is defined by macro. this rule reports a violation.
Instead, use enum or const variables.
However, it's ok to write a macro function.
And.. If the macro is start with underbar,
it regards this macro is defined for the special purpose
and it doesn't report a violation on it.
== Violation ==
#define KK 1 <== Violation
#define TT "sds" <== Violation
== Good ==
#define KK(A) (A)*3 <== Don't care. It's macro function
const int k = 3; <== OK
const char *t = "EWEE"; <== OK
"""
from nsiqcppstyle_rulehelper import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
def RunRule(lexer, contextStack) :
t = lexer.GetCurToken()
if t.type == "PREPROCESSOR" and t.value.find("define") != -1 :
d = lexer.GetNextTokenSkipWhiteSpaceAndComment()
k2 = lexer.GetNextTokenSkipWhiteSpaceAndComment()
if d.type == "ID" and k2 != None and k2.type in ["NUMBER", "STRING", "CHARACTOR"] and d.lineno == k2.lineno :
if not Search("^_", d.value) :
nsiqcppstyle_reporter.Error(d, __name__,
"Do not use macro(%s) for constant" % d.value)
ruleManager.AddPreprocessRule(RunRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddPreprocessRule(RunRule)
def test1(self):
self.Analyze("thisfile.c","""
#define k 1
""")
assert CheckErrorContent(__name__)
def test2(self):
self.Analyze("thisfile.c","""
#define tt(A) 3
""")
assert not CheckErrorContent(__name__)
def test3(self):
self.Analyze("thisfile.c","""
# define t "ewew"
""")
assert CheckErrorContent(__name__)
def test4(self):
self.Analyze("thisfile.c","""
# define _t "ewew"
""")
assert not CheckErrorContent(__name__)
| lgpl-2.1 |
skycucumber/Messaging-Gateway | webapp/venv/lib/python2.7/site-packages/twisted/python/dist3.py | 23 | 8952 | # -*- test-case-name: twisted.python.test.test_dist3 -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Support for installing Twisted on Python 3.
Only necessary while parts of Twisted are unported.
@var modules: A list of modules that have been ported,
e.g. "twisted.python.versions"; a package name (e.g. "twisted.python")
indicates the corresponding __init__.py file has been ported
(e.g. "twisted/python/__init__.py"). To reduce merge conflicts, add new
lines in alphabetical sort.
@var testModules: A list of test modules that have been ported, e.g
"twisted.python.test.test_versions". To reduce merge conflicts, add new
lines in alphabetical sort.
@var almostModules: A list of any other modules which are needed by any of the
modules in the other two lists, but which themselves have not actually
been properly ported to Python 3. These modules might work well enough to
satisfy some of the requirements of the modules that depend on them, but
cannot be considered generally usable otherwise.
@var modulesToInstall: A list of all modules that should be installed on
Python 3.
"""
from __future__ import division
modules = [
"twisted",
"twisted.copyright",
"twisted.internet",
"twisted.internet.abstract",
"twisted.internet.address",
"twisted.internet.base",
"twisted.internet.default",
"twisted.internet.defer",
"twisted.internet.endpoints",
"twisted.internet.epollreactor",
"twisted.internet.error",
"twisted.internet.interfaces",
"twisted.internet.fdesc",
"twisted.internet.gireactor",
"twisted.internet._glibbase",
"twisted.internet.gtk3reactor",
"twisted.internet.main",
"twisted.internet._newtls",
"twisted.internet.posixbase",
"twisted.internet.protocol",
"twisted.internet.pollreactor",
"twisted.internet.reactor",
"twisted.internet.selectreactor",
"twisted.internet._signals",
"twisted.internet.ssl",
"twisted.internet.task",
"twisted.internet.tcp",
"twisted.internet.test",
"twisted.internet.test.connectionmixins",
"twisted.internet.test.modulehelpers",
"twisted.internet.test._posixifaces",
"twisted.internet.test.reactormixins",
"twisted.internet.threads",
"twisted.internet.udp",
"twisted.internet.utils",
"twisted.names",
"twisted.names.cache",
"twisted.names.client",
"twisted.names.common",
"twisted.names.dns",
"twisted.names.error",
"twisted.names.hosts",
"twisted.names.resolve",
"twisted.names._rfc1982",
"twisted.names.test",
"twisted.names._version",
"twisted.protocols",
"twisted.protocols.basic",
"twisted.protocols.policies",
"twisted.protocols.test",
"twisted.protocols.tls",
"twisted.python",
"twisted.python.compat",
"twisted.python.components",
"twisted.python.constants",
"twisted.python.context",
"twisted.python.deprecate",
"twisted.python.dist3",
"twisted.python.failure",
"twisted.python.filepath",
"twisted.python.lockfile",
"twisted.python.log",
"twisted.python.monkey",
"twisted.python.randbytes",
"twisted.python.reflect",
"twisted.python.runtime",
"twisted.python.test",
"twisted.python.test.deprecatedattributes",
"twisted.python.test.modules_helpers",
"twisted.python.threadable",
"twisted.python.threadpool",
"twisted.python.util",
"twisted.python.versions",
"twisted.test",
"twisted.test.proto_helpers",
"twisted.test.iosim",
"twisted.test.ssl_helpers",
"twisted.trial",
"twisted.trial._asynctest",
"twisted.trial.itrial",
"twisted.trial._synctest",
"twisted.trial.test",
"twisted.trial.test.detests",
"twisted.trial.test.erroneous",
"twisted.trial.test.suppression",
"twisted.trial.test.packages",
"twisted.trial.test.skipping",
"twisted.trial.test.suppression",
"twisted.trial.unittest",
"twisted.trial.util",
"twisted._version",
"twisted.web",
"twisted.web.http_headers",
"twisted.web.resource",
"twisted.web._responses",
"twisted.web.test",
"twisted.web.test.requesthelper",
"twisted.web._version",
]
testModules = [
"twisted.internet.test.test_abstract",
"twisted.internet.test.test_address",
"twisted.internet.test.test_base",
"twisted.internet.test.test_core",
"twisted.internet.test.test_default",
"twisted.internet.test.test_endpoints",
"twisted.internet.test.test_epollreactor",
"twisted.internet.test.test_fdset",
"twisted.internet.test.test_filedescriptor",
"twisted.internet.test.test_inlinecb",
"twisted.internet.test.test_gireactor",
"twisted.internet.test.test_glibbase",
"twisted.internet.test.test_main",
"twisted.internet.test.test_newtls",
"twisted.internet.test.test_posixbase",
"twisted.internet.test.test_protocol",
"twisted.internet.test.test_sigchld",
"twisted.internet.test.test_tcp",
"twisted.internet.test.test_threads",
"twisted.internet.test.test_tls",
"twisted.internet.test.test_udp",
"twisted.internet.test.test_udp_internals",
"twisted.names.test.test_cache",
"twisted.names.test.test_client",
"twisted.names.test.test_common",
"twisted.names.test.test_dns",
"twisted.names.test.test_hosts",
"twisted.names.test.test_rfc1982",
"twisted.protocols.test.test_basic",
"twisted.protocols.test.test_tls",
"twisted.python.test.test_components",
"twisted.python.test.test_constants",
"twisted.python.test.test_deprecate",
"twisted.python.test.test_dist3",
"twisted.python.test.test_runtime",
"twisted.python.test.test_util",
"twisted.python.test.test_versions",
"twisted.test.test_abstract",
"twisted.test.test_compat",
"twisted.test.test_context",
"twisted.test.test_cooperator",
"twisted.test.test_defer",
"twisted.test.test_defgen",
"twisted.test.test_error",
"twisted.test.test_factories",
"twisted.test.test_failure",
"twisted.test.test_fdesc",
"twisted.test.test_internet",
"twisted.test.test_iosim",
"twisted.test.test_iutils",
"twisted.test.test_lockfile",
"twisted.test.test_log",
"twisted.test.test_loopback",
"twisted.test.test_monkey",
"twisted.test.test_paths",
"twisted.test.test_policies",
"twisted.test.test_randbytes",
"twisted.test.test_reflect",
"twisted.test.test_setup",
"twisted.test.test_ssl",
"twisted.test.test_sslverify",
"twisted.test.test_task",
"twisted.test.test_tcp",
"twisted.test.test_tcp_internals",
"twisted.test.test_threadable",
"twisted.test.test_threads",
"twisted.test.test_twisted",
"twisted.test.test_threadpool",
"twisted.test.test_udp",
"twisted.trial.test.test_assertions",
"twisted.trial.test.test_asyncassertions",
"twisted.trial.test.test_deferred",
"twisted.trial.test.test_pyunitcompat",
"twisted.trial.test.test_suppression",
"twisted.trial.test.test_testcase",
"twisted.trial.test.test_tests",
"twisted.trial.test.test_util",
"twisted.trial.test.test_warning",
# The downloadPage tests weren't ported:
"twisted.web.test.test_webclient",
"twisted.web.test.test_http",
"twisted.web.test.test_http_headers",
"twisted.web.test.test_resource",
"twisted.web.test.test_web",
]
almostModules = [
# Missing test coverage, see #6156:
"twisted.internet._sslverify",
# twisted.names.client semi-depends on twisted.names.root, but only on
# Windows really:
"twisted.names.root",
# Missing test coverage:
"twisted.protocols.loopback",
# Minimally used by setup3.py:
"twisted.python.dist",
# twisted.python.filepath depends on twisted.python.win32, but on Linux it
# only really needs to import:
"twisted.python.win32",
"twisted.test.reflect_helper_IE",
"twisted.test.reflect_helper_VE",
"twisted.test.reflect_helper_ZDE",
# Required by some of the ported trial tests:
"twisted.trial.reporter",
# Agent code and downloadPage aren't ported, test coverage isn't complete:
"twisted.web.client",
# twisted.web.resource depends on twisted.web.error, so it is sorta
# ported, but its tests are not yet ported, so it probably doesn't
# completely work.
"twisted.web.error",
# Required by twisted.web.server, no actual code here:
"twisted.web.iweb",
# Required by twisted.web.server for an error handling case:
"twisted.web.html",
# This module has a lot of missing test coverage. What tests it has pass,
# but it needs a lot more. It was ported only enough to make the client
# work.
"twisted.web.http",
# GzipEncoder and allowed methods functionality not ported, no doubt
# missing lots of test coverage:
"twisted.web.server",
]
modulesToInstall = modules + testModules + almostModules
| gpl-2.0 |
pombreda/pygowave-server | pygowave_server/urls.py | 4 | 1194 |
#
# PyGoWave Server - The Python Google Wave Server
# Copyright 2009 Patrick Schneider <patrick.p2k.schneider@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.conf.urls.defaults import *
from pygowave_server.views import *
from django.conf import settings as django_settings
urlpatterns = patterns('',
(r'^$', index),
(r'^home/$', home),
(r'^settings/$', settings),
(r'^waves/$', wave_list),
(r'^waves/(?P<wave_id>\w+)/$', wave),
(r'^gadgets/$', all_gadgets),
(r'^gadgets/mine/$', my_gadgets),
(r'^gadgets/load/$', gadget_loader),
)
if 'rosetta' in django_settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'^rosetta/', include('rosetta.urls')),
)
| apache-2.0 |
saurabh6790/omnit-lib | webnotes/modules/import_file.py | 34 | 3331 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes, os
from webnotes.modules import scrub, get_module_path, scrub_dt_dn
import webnotes.plugins
def import_files(module, dt=None, dn=None, plugin=None, force=False):
if type(module) is list:
out = []
for m in module:
out.append(import_file(m[0], m[1], m[2], plugin=plugin, force=force))
return out
else:
return import_file(module, dt, dn, plugin=plugin, force=force)
def import_file(module, dt, dn, plugin=None, force=False):
"""Sync a file from txt if modifed, return false if not updated"""
webnotes.flags.in_import = True
dt, dn = scrub_dt_dn(dt, dn)
if plugin:
path = webnotes.plugins.get_path(module, dt, dn, plugin, extn="txt")
else:
path = os.path.join(get_module_path(module),
os.path.join(dt, dn, dn + '.txt'))
ret = import_file_by_path(path, force)
webnotes.flags.in_import = False
return ret
def import_file_by_path(path, force=False):
if os.path.exists(path):
from webnotes.modules.utils import peval_doclist
with open(path, 'r') as f:
doclist = peval_doclist(f.read())
if doclist:
doc = doclist[0]
if not force:
# check if timestamps match
if doc['modified']==str(webnotes.conn.get_value(doc['doctype'], doc['name'], 'modified')):
return False
original_modified = doc["modified"]
import_doclist(doclist)
# since there is a new timestamp on the file, update timestamp in
webnotes.conn.sql("update `tab%s` set modified=%s where name=%s" % \
(doc['doctype'], '%s', '%s'),
(original_modified, doc['name']))
return True
else:
raise Exception, '%s missing' % path
ignore_values = {
"Report": ["disabled"],
}
ignore_doctypes = ["Page Role", "DocPerm"]
def import_doclist(doclist):
doctype = doclist[0]["doctype"]
name = doclist[0]["name"]
old_doc = None
doctypes = set([d["doctype"] for d in doclist])
ignore = list(doctypes.intersection(set(ignore_doctypes)))
if doctype in ignore_values:
if webnotes.conn.exists(doctype, name):
old_doc = webnotes.doc(doctype, name)
# delete old
webnotes.delete_doc(doctype, name, force=1, ignore_doctypes=ignore, for_reload=True)
# don't overwrite ignored docs
doclist1 = remove_ignored_docs_if_they_already_exist(doclist, ignore, name)
# update old values (if not to be overwritten)
if doctype in ignore_values and old_doc:
update_original_values(doclist1, doctype, old_doc)
# reload_new
new_bean = webnotes.bean(doclist1)
new_bean.ignore_children_type = ignore
new_bean.ignore_links = True
new_bean.ignore_validate = True
new_bean.ignore_permissions = True
new_bean.ignore_mandatory = True
if doctype=="DocType" and name in ["DocField", "DocType"]:
new_bean.ignore_fields = True
new_bean.insert()
def remove_ignored_docs_if_they_already_exist(doclist, ignore, name):
doclist1 = doclist
if ignore:
has_records = []
for d in ignore:
if webnotes.conn.get_value(d, {"parent":name}):
has_records.append(d)
if has_records:
doclist1 = filter(lambda d: d["doctype"] not in has_records, doclist)
return doclist1
def update_original_values(doclist, doctype, old_doc):
for key in ignore_values[doctype]:
doclist[0][key] = old_doc.fields[key]
| mit |
Hashish420/SCRYPTCOIN | contrib/bitrpc/bitrpc.py | 1 | 7834 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:7462")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:7462")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a scrypt address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a scrypt address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| mit |
ajgallegog/gem5_arm | configs/topologies/Crossbar.py | 47 | 2679 | # Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
from m5.params import *
from m5.objects import *
from BaseTopology import SimpleTopology
class Crossbar(SimpleTopology):
description='Crossbar'
def makeTopology(self, options, network, IntLink, ExtLink, Router):
# Create an individual router for each controller plus one more for
# the centralized crossbar. The large numbers of routers are needed
# because external links do not model outgoing bandwidth in the
# simple network, but internal links do.
routers = [Router(router_id=i) for i in range(len(self.nodes)+1)]
xbar = routers[len(self.nodes)] # the crossbar router is the last router created
network.routers = routers
ext_links = [ExtLink(link_id=i, ext_node=n, int_node=routers[i])
for (i, n) in enumerate(self.nodes)]
network.ext_links = ext_links
link_count = len(self.nodes)
int_links = [IntLink(link_id=(link_count+i),
node_a=routers[i], node_b=xbar)
for i in range(len(self.nodes))]
network.int_links = int_links
| bsd-3-clause |
TariqAHassan/ZeitSci | analysis/supplementary_fns.py | 1 | 4393 | import re
import time
from itertools import chain
def pprint(string, n=80):
"""
Pretty print a string, breaking it in chucks on length n.
"""
if not isinstance(string, str):
raise ValueError("Input must be a string!")
if len(string) < n:
print(string)
else:
# see http://stackoverflow.com/questions/9475241/split-python-string-every-nth-character
string_split = [string[i:i + n] for i in range(0, len(string), n)]
for l in string_split:
print(l.lstrip())
def lprint(input_list, tstep=None):
"""
:param input_list:
:return:
"""
if isinstance(input_list, dict):
for k, v in input_list.items():
print(k, " ---> ", v)
if not isinstance(input_list, list) and not isinstance(input_list, dict):
print(input_list)
if len(input_list) == 0:
print("--- Empty List ---")
elif isinstance(input_list, list):
for l in range(len(input_list)):
if isinstance(tstep, int) or isinstance(tstep, float):
time.sleep(tstep)
print(str(l) + ":", input_list[l])
def cln(i, extent=1):
"""
String white space 'cleaner'.
:param i:
:param extent: 1 --> all white space reduced to length 1; 2 --> removal of all white space.
:return:
"""
if isinstance(i, str) and i != "":
if extent == 1:
return re.sub(r"\s\s+", " ", i)
elif extent == 2:
return re.sub(r"\s+", "", i)
else:
return i
# else:
# return None
#
# if es:
# return to_return.lstrip().rstrip()
# else:
# return to_return
def insen_replace(input_str, term, replacement):
"""
String replace function which is insentiive to case
replaces string regardless of case.
see: http://stackoverflow.com/questions/919056/case-insensitive-replace
:param input_str:
:param term:
:param replacement:
:return:
"""
disp_term = re.compile(re.escape(term), re.IGNORECASE)
return disp_term.sub(replacement, disp[i]).lstrip().rstrip()
def partial_match(input_str, looking_for):
"""
:param input_str:
:param looking_for:
:return:
"""
if isinstance(input_str, str) and isinstance(looking_for, str):
return cln(looking_for.lower(), 1) in cln(input_str.lower(), 1)
else:
return False
def partial_list_match(input_str, allowed_list):
"""
:param input_str:
:param allowed_list:
:return:
"""
allowed_list = [cln(i.lower(), 1).lstrip().rstrip() for i in allowed_list]
for i in allowed_list:
if partial_match(input_str=input_str, looking_for=i):
return True
return False
def endpoints_str(input, start, end=","):
"""
:param input:
:param start:
:param end:
:return:
"""
try:
return cln(start + input[len(start):-len(end)], 1).lstrip().rstrip()
except:
return None
def pandas_col_shift(data_frame, column, move_to=0):
"""
Please see Sachinmm's StackOverflow answer:
http://stackoverflow.com/questions/25122099/move-column-by-name-to-front-of-table-in-pandas
:param data_frame: a pandas dataframe
:param column: the column to be moved
:param move_to: position in df to move the column to; defaults to 0 (first)
:return:
"""
if not (0 <= move_to <= data_frame.shape[1]):
raise AttributeError("Invalid move_to value.")
if not isinstance(column, str):
raise ValueError("the column was not provided as a string.")
if column not in data_frame.columns.tolist():
raise AttributeError("the dataframe has no column: %s." % (column))
cols = data_frame.columns.tolist()
cols.insert(move_to, cols.pop(cols.index(column)))
return data_frame.reindex(columns=cols)
def items_present_test(input_list, clist):
"""
Check if any of the items in clist are in input_list
:param input_list: a list to look for them in
:param clist: things you're looking for
:return:
"""
return any(x in input_list for x in clist)
def fast_flatten(input_list):
return list(chain.from_iterable(input_list))
def multi_replace(input_str, to_remove):
for tr in to_remove:
input_str = input_str.replace(tr, "")
return input_str
| gpl-3.0 |
bokeh/bokeh | bokeh/models/widgets/panels.py | 1 | 1085 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Compatibility module for panels.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from ..layouts import Panel, Tabs; Panel, Tabs
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Panel',
'Tabs',
)
| bsd-3-clause |
mdietrichc2c/OCB | addons/sale_crm/__openerp__.py | 52 | 2128 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Opportunity to Quotation',
'version': '1.0',
'category': 'Hidden',
'description': """
This module adds a shortcut on one or several opportunity cases in the CRM.
===========================================================================
This shortcut allows you to generate a sales order based on the selected case.
If different cases are open (a list), it generates one sale order by case.
The case is then closed and linked to the generated sales order.
We suggest you to install this module, if you installed both the sale and the crm
modules.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'images': ['images/crm_statistics_dashboard.jpeg', 'images/opportunity_to_quote.jpeg'],
'depends': ['sale', 'crm', 'web_kanban_gauge'],
'data': [
'wizard/crm_make_sale_view.xml',
'sale_crm_view.xml',
'security/sale_crm_security.xml',
'security/ir.model.access.csv',
],
'demo': [],
'test': ['test/sale_crm.yml'],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
arbrandes/edx-platform | scripts/xsslint/xsslint/linters.py | 3 | 66920 | """
Linter classes containing logic for checking various filetypes.
"""
import ast
import io
import os
import re
import textwrap
from xsslint import visitors
from xsslint.reporting import ExpressionRuleViolation, FileResults, RuleViolation
from xsslint.rules import RuleSet
from xsslint.utils import Expression, ParseString, StringLines, is_skip_dir
from xsslint.django_linter import TransExpression, BlockTransExpression, HtmlInterpolateExpression
class BaseLinter:
"""
BaseLinter provides some helper functions that are used by multiple linters.
"""
LINE_COMMENT_DELIM = None
def _is_valid_directory(self, skip_dirs, directory):
"""
Determines if the provided directory is a directory that could contain
a file that needs to be linted.
Arguments:
skip_dirs: The directories to be skipped.
directory: The directory to be linted.
Returns:
True if this directory should be linted for violations and False
otherwise.
"""
if is_skip_dir(skip_dirs, directory):
return False
return True
def _load_file(self, file_full_path):
"""
Loads a file into a string.
Arguments:
file_full_path: The full path of the file to be loaded.
Returns:
A string containing the files contents.
"""
with open(file_full_path) as input_file:
file_contents = input_file.read()
return file_contents
def _load_and_check_file_is_safe(self, file_full_path, lint_function, results):
"""
Loads the Python file and checks if it is in violation.
Arguments:
file_full_path: The file to be loaded and linted.
lint_function: A function that will lint for violations. It must
take two arguments:
1) string contents of the file
2) results object
results: A FileResults to be used for this file
Returns:
The file results containing any violations.
"""
file_contents = self._load_file(file_full_path)
lint_function(file_contents, results)
return results
def _find_closing_char_index(
self, start_delim, open_char, close_char, template, start_index, num_open_chars=0, strings=None
):
"""
Finds the index of the closing char that matches the opening char.
For example, this could be used to find the end of a Mako expression,
where the open and close characters would be '{' and '}'.
Arguments:
start_delim: If provided (e.g. '${' for Mako expressions), the
closing character must be found before the next start_delim.
open_char: The opening character to be matched (e.g '{')
close_char: The closing character to be matched (e.g '}')
template: The template to be searched.
start_index: The start index of the last open char.
num_open_chars: The current number of open chars.
strings: A list of ParseStrings already parsed
Returns:
A dict containing the following, or None if unparseable:
close_char_index: The index of the closing character
strings: a list of ParseStrings
"""
strings = [] if strings is None else strings
# Find start index of an uncommented line.
start_index = self._uncommented_start_index(template, start_index)
# loop until we found something useful on an uncommented out line
while start_index is not None:
close_char_index = template.find(close_char, start_index)
if close_char_index < 0:
# If we can't find a close char, let's just quit.
return None
open_char_index = template.find(open_char, start_index, close_char_index)
parse_string = ParseString(template, start_index, close_char_index)
valid_index_list = [close_char_index]
if 0 <= open_char_index:
valid_index_list.append(open_char_index)
if parse_string.start_index is not None:
valid_index_list.append(parse_string.start_index)
min_valid_index = min(valid_index_list)
start_index = self._uncommented_start_index(template, min_valid_index)
if start_index == min_valid_index:
break
if start_index is None:
# No uncommented code to search.
return None
if parse_string.start_index == min_valid_index:
strings.append(parse_string)
if parse_string.end_index is None:
return None
else:
return self._find_closing_char_index(
start_delim, open_char, close_char, template, start_index=parse_string.end_index,
num_open_chars=num_open_chars, strings=strings
)
if open_char_index == min_valid_index:
if start_delim is not None:
# if we find another starting delim, consider this unparseable
start_delim_index = template.find(start_delim, start_index, close_char_index)
if 0 <= start_delim_index < open_char_index:
return None
return self._find_closing_char_index(
start_delim, open_char, close_char, template, start_index=open_char_index + 1,
num_open_chars=num_open_chars + 1, strings=strings
)
if num_open_chars == 0:
return {
'close_char_index': close_char_index,
'strings': strings,
}
else:
return self._find_closing_char_index(
start_delim, open_char, close_char, template, start_index=close_char_index + 1,
num_open_chars=num_open_chars - 1, strings=strings
)
def _uncommented_start_index(self, template, start_index):
"""
Finds the first start_index that is on an uncommented line.
Arguments:
template: The template to be searched.
start_index: The start index of the last open char.
Returns:
If start_index is on an uncommented out line, returns start_index.
Otherwise, returns the start_index of the first line that is
uncommented, if there is one. Otherwise, returns None.
"""
if self.LINE_COMMENT_DELIM is not None:
line_start_index = StringLines(template).index_to_line_start_index(start_index)
uncommented_line_start_index_regex = re.compile(fr"^(?!\s*{self.LINE_COMMENT_DELIM})", re.MULTILINE)
# Finds the line start index of the first uncommented line, including the current line.
match = uncommented_line_start_index_regex.search(template, line_start_index)
if match is None:
# No uncommented lines.
return None
elif match.start() < start_index:
# Current line is uncommented, so return original start_index.
return start_index
else:
# Return start of first uncommented line.
return match.start()
else:
# No line comment delimeter, so this acts as a no-op.
return start_index
class UnderscoreTemplateLinter(BaseLinter):
"""
The linter for Underscore.js template files.
"""
ruleset = RuleSet(
underscore_not_escaped='underscore-not-escaped',
)
def __init__(self, skip_dirs=None):
"""
Init method.
"""
super().__init__()
self._skip_underscore_dirs = skip_dirs or ()
def process_file(self, directory, file_name):
"""
Process file to determine if it is an Underscore template file and
if it is safe.
Arguments:
directory (string): The directory of the file to be checked
file_name (string): A filename for a potential underscore file
Returns:
The file results containing any violations.
"""
full_path = os.path.normpath(directory + '/' + file_name)
results = FileResults(full_path)
if not self._is_valid_directory(self._skip_underscore_dirs, directory):
return results
if not file_name.lower().endswith('.underscore'):
return results
return self._load_and_check_file_is_safe(full_path, self.check_underscore_file_is_safe, results)
def check_underscore_file_is_safe(self, underscore_template, results):
"""
Checks for violations in an Underscore.js template.
Arguments:
underscore_template: The contents of the Underscore.js template.
results: A file results objects to which violations will be added.
"""
self._check_underscore_expressions(underscore_template, results)
results.prepare_results(underscore_template)
def _check_underscore_expressions(self, underscore_template, results):
"""
Searches for Underscore.js expressions that contain violations.
Arguments:
underscore_template: The contents of the Underscore.js template.
results: A list of results into which violations will be added.
"""
expressions = self._find_unescaped_expressions(underscore_template)
for expression in expressions:
if not self._is_safe_unescaped_expression(expression):
results.violations.append(ExpressionRuleViolation(
self.ruleset.underscore_not_escaped, expression
))
def _is_safe_unescaped_expression(self, expression):
"""
Determines whether an expression is safely escaped, even though it is
using the expression syntax that doesn't itself escape (i.e. <%= ).
In some cases it is ok to not use the Underscore.js template escape
(i.e. <%- ) because the escaping is happening inside the expression.
Safe examples::
<%= edx.HtmlUtils.ensureHtml(message) %>
<%= HtmlUtils.ensureHtml(message) %>
<%= _.escape(message) %>
Arguments:
expression: The Expression being checked.
Returns:
True if the Expression has been safely escaped, and False otherwise.
"""
if expression.expression_inner.startswith('edx.HtmlUtils.'):
return True
if expression.expression_inner.startswith('HtmlUtils.'):
return True
if expression.expression_inner.startswith('_.escape('):
return True
return False
def _find_unescaped_expressions(self, underscore_template):
"""
Returns a list of unsafe expressions.
At this time all expressions that are unescaped are considered unsafe.
Arguments:
underscore_template: The contents of the Underscore.js template.
Returns:
A list of Expressions.
"""
unescaped_expression_regex = re.compile("<%=.*?%>", re.DOTALL)
expressions = []
for match in unescaped_expression_regex.finditer(underscore_template):
expression = Expression(
match.start(), match.end(), template=underscore_template, start_delim="<%=", end_delim="%>"
)
expressions.append(expression)
return expressions
class JavaScriptLinter(BaseLinter):
"""
The linter for JavaScript files.
"""
LINE_COMMENT_DELIM = "//"
ruleset = RuleSet(
javascript_jquery_append='javascript-jquery-append',
javascript_jquery_prepend='javascript-jquery-prepend',
javascript_jquery_insertion='javascript-jquery-insertion',
javascript_jquery_insert_into_target='javascript-jquery-insert-into-target',
javascript_jquery_html='javascript-jquery-html',
javascript_concat_html='javascript-concat-html',
javascript_escape='javascript-escape',
)
def __init__(self, underscore_linter, javascript_skip_dirs=None):
"""
Init method.
"""
super().__init__()
self.underscore_linter = underscore_linter
self.ruleset = self.ruleset + self.underscore_linter.ruleset
self._skip_javascript_dirs = javascript_skip_dirs or ()
def process_file(self, directory, file_name):
"""
Process file to determine if it is a JavaScript file and
if it is safe.
Arguments:
directory (string): The directory of the file to be checked
file_name (string): A filename for a potential JavaScript file
Returns:
The file results containing any violations.
"""
file_full_path = os.path.normpath(directory + '/' + file_name)
results = FileResults(file_full_path)
if not results.is_file:
return results
if file_name.lower().endswith('.js') and not file_name.lower().endswith('.min.js'):
skip_dirs = self._skip_javascript_dirs
else:
return results
if not self._is_valid_directory(skip_dirs, directory):
return results
return self._load_and_check_file_is_safe(file_full_path, self.check_javascript_file_is_safe, results)
def check_javascript_file_is_safe(self, file_contents, results):
"""
Checks for violations in a JavaScript file.
Arguments:
file_contents: The contents of the JavaScript file.
results: A file results objects to which violations will be added.
"""
no_caller_check = None
no_argument_check = None
self._check_jquery_function(
file_contents, "append", self.ruleset.javascript_jquery_append, no_caller_check,
self._is_jquery_argument_safe, results
)
self._check_jquery_function(
file_contents, "prepend", self.ruleset.javascript_jquery_prepend, no_caller_check,
self._is_jquery_argument_safe, results
)
self._check_jquery_function(
file_contents, "unwrap|wrap|wrapAll|wrapInner|after|before|replaceAll|replaceWith",
self.ruleset.javascript_jquery_insertion, no_caller_check, self._is_jquery_argument_safe, results
)
self._check_jquery_function(
file_contents, "appendTo|prependTo|insertAfter|insertBefore",
self.ruleset.javascript_jquery_insert_into_target, self._is_jquery_insert_caller_safe, no_argument_check, results
)
self._check_jquery_function(
file_contents, "html", self.ruleset.javascript_jquery_html, no_caller_check,
self._is_jquery_html_argument_safe, results
)
self._check_javascript_escape(file_contents, results)
self._check_concat_with_html(file_contents, self.ruleset.javascript_concat_html, results)
self.underscore_linter.check_underscore_file_is_safe(file_contents, results)
results.prepare_results(file_contents, line_comment_delim=self.LINE_COMMENT_DELIM)
def _get_expression_for_function(self, file_contents, function_start_match):
"""
Returns an expression that matches the function call opened with
function_start_match.
Arguments:
file_contents: The contents of the JavaScript file.
function_start_match: A regex match representing the start of the function
call (e.g. ".escape(").
Returns:
An Expression that best matches the function.
"""
start_index = function_start_match.start()
inner_start_index = function_start_match.end()
result = self._find_closing_char_index(
None, "(", ")", file_contents, start_index=inner_start_index
)
if result is not None:
end_index = result['close_char_index'] + 1
expression = Expression(
start_index, end_index, template=file_contents, start_delim=function_start_match.group(), end_delim=")"
)
else:
expression = Expression(start_index)
return expression
def _check_javascript_escape(self, file_contents, results):
"""
Checks that escape() is not used. escape() is not recommended.
ref. https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/escape
Arguments:
file_contents: The contents of the JavaScript file.
results: A file results objects to which violations will be added.
"""
# Regex to match uses of escape() or window.escape().
regex = re.compile(r"(?:^|(?<=window\.)|(?<![\w.$]))escape\(")
for function_match in regex.finditer(file_contents):
expression = self._get_expression_for_function(file_contents, function_match)
results.violations.append(ExpressionRuleViolation(self.ruleset.javascript_escape, expression))
def _check_jquery_function(self, file_contents, function_names, rule, is_caller_safe, is_argument_safe, results):
"""
Checks that the JQuery function_names (e.g. append(), prepend()) calls
are safe.
Arguments:
file_contents: The contents of the JavaScript file.
function_names: A pipe delimited list of names of the functions
(e.g. "wrap|after|before").
rule: The name of the rule to use for validation errors (e.g.
self.ruleset.javascript_jquery_append).
is_caller_safe: A function to test if caller of the JQuery function
is safe.
is_argument_safe: A function to test if the argument passed to the
JQuery function is safe.
results: A file results objects to which violations will be added.
"""
# Ignores calls starting with "HtmlUtils.", because those are safe
regex = re.compile(fr"(?<!HtmlUtils).(?:{function_names})\(")
for function_match in regex.finditer(file_contents):
is_violation = True
expression = self._get_expression_for_function(file_contents, function_match)
if expression.end_index is not None:
start_index = expression.start_index
inner_start_index = function_match.end()
close_paren_index = expression.end_index - 1
function_argument = file_contents[inner_start_index:close_paren_index].strip()
if is_argument_safe is not None and is_caller_safe is None:
is_violation = is_argument_safe(function_argument) is False
elif is_caller_safe is not None and is_argument_safe is None:
line_start_index = StringLines(file_contents).index_to_line_start_index(start_index)
caller_line_start = file_contents[line_start_index:start_index]
is_violation = is_caller_safe(caller_line_start) is False
else:
raise ValueError("Must supply either is_argument_safe, or is_caller_safe, but not both.")
if is_violation:
results.violations.append(ExpressionRuleViolation(rule, expression))
def _is_jquery_argument_safe_html_utils_call(self, argument):
"""
Checks that the argument sent to a jQuery DOM insertion function is a
safe call to HtmlUtils.
A safe argument is of the form:
- HtmlUtils.xxx(anything).toString()
- edx.HtmlUtils.xxx(anything).toString()
Arguments:
argument: The argument sent to the jQuery function (e.g.
append(argument)).
Returns:
True if the argument is safe, and False otherwise.
"""
# match on HtmlUtils.xxx().toString() or edx.HtmlUtils
match = re.search(r"(?:edx\.)?HtmlUtils\.[a-zA-Z0-9]+\(.*\)\.toString\(\)", argument)
return match is not None and match.group() == argument
def _is_jquery_argument_safe(self, argument):
"""
Check the argument sent to a jQuery DOM insertion function (e.g.
append()) to check if it is safe.
Safe arguments include:
- the argument can end with ".el", ".$el" (with no concatenation)
- the argument can be a single variable ending in "El" or starting with
"$". For example, "testEl" or "$test".
- the argument can be a single string literal with no HTML tags
- the argument can be a call to $() with the first argument a string
literal with a single HTML tag. For example, ".append($('<br/>'))"
or ".append($('<br/>'))".
- the argument can be a call to HtmlUtils.xxx(html).toString()
Arguments:
argument: The argument sent to the jQuery function (e.g.
append(argument)).
Returns:
True if the argument is safe, and False otherwise.
"""
match_variable_name = re.search("[_$a-zA-Z]+[_$a-zA-Z0-9]*", argument)
if match_variable_name is not None and match_variable_name.group() == argument:
if argument.endswith('El') or argument.startswith('$'):
return True
elif argument.startswith('"') or argument.startswith("'"):
# a single literal string with no HTML is ok
# 1. it gets rid of false negatives for non-jquery calls (e.g. graph.append("g"))
# 2. JQuery will treat this as a plain text string and will escape any & if needed.
string = ParseString(argument, 0, len(argument))
if string.string == argument and "<" not in argument:
return True
elif argument.startswith('$('):
# match on JQuery calls with single string and single HTML tag
# Examples:
# $("<span>")
# $("<div/>")
# $("<div/>", {...})
match = re.search(r"""\$\(\s*['"]<[a-zA-Z0-9]+\s*[/]?>['"]\s*[,)]""", argument)
if match is not None:
return True
elif self._is_jquery_argument_safe_html_utils_call(argument):
return True
# check rules that shouldn't use concatenation
elif "+" not in argument:
if argument.endswith('.el') or argument.endswith('.$el'):
return True
return False
def _is_jquery_html_argument_safe(self, argument):
"""
Check the argument sent to the jQuery html() function to check if it is
safe.
Safe arguments to html():
- no argument (i.e. getter rather than setter)
- empty string is safe
- the argument can be a call to HtmlUtils.xxx(html).toString()
Arguments:
argument: The argument sent to html() in code (i.e. html(argument)).
Returns:
True if the argument is safe, and False otherwise.
"""
if argument == "" or argument == "''" or argument == '""':
return True
elif self._is_jquery_argument_safe_html_utils_call(argument):
return True
return False
def _is_jquery_insert_caller_safe(self, caller_line_start):
"""
Check that the caller of a jQuery DOM insertion function that takes a
target is safe (e.g. thisEl.appendTo(target)).
If original line was::
draggableObj.iconEl.appendTo(draggableObj.containerEl);
Parameter caller_line_start would be:
draggableObj.iconEl
Safe callers include:
- the caller can be ".el", ".$el"
- the caller can be a single variable ending in "El" or starting with
"$". For example, "testEl" or "$test".
Arguments:
caller_line_start: The line leading up to the jQuery function call.
Returns:
True if the caller is safe, and False otherwise.
"""
# matches end of line for caller, which can't itself be a function
caller_match = re.search(r"(?:\s*|[.])([_$a-zA-Z]+[_$a-zA-Z0-9])*$", caller_line_start)
if caller_match is None:
return False
caller = caller_match.group(1)
if caller is None:
return False
elif caller.endswith('El') or caller.startswith('$'):
return True
elif caller == 'el' or caller == 'parentNode':
return True
return False
def _check_concat_with_html(self, file_contents, rule, results):
"""
Checks that strings with HTML are not concatenated
Arguments:
file_contents: The contents of the JavaScript file.
rule: The rule that was violated if this fails.
results: A file results objects to which violations will be added.
"""
lines = StringLines(file_contents)
last_expression = None
# Match quoted strings that starts with '<' or ends with '>'.
regex_string_with_html = r"""
{quote} # Opening quote.
(
\s*< # Starts with '<' (ignoring spaces)
([^{quote}]|[\\]{quote})* # followed by anything but a closing quote.
| # Or,
([^{quote}]|[\\]{quote})* # Anything but a closing quote
>\s* # ending with '>' (ignoring spaces)
)
{quote} # Closing quote.
"""
# Match single or double quote.
regex_string_with_html = "({}|{})".format(
regex_string_with_html.format(quote="'"),
regex_string_with_html.format(quote='"'),
)
# Match quoted HTML strings next to a '+'.
regex_concat_with_html = re.compile(
r"(\+\s*{string_with_html}|{string_with_html}\s*\+)".format(
string_with_html=regex_string_with_html,
),
re.VERBOSE
)
for match in regex_concat_with_html.finditer(file_contents):
found_new_violation = False
if last_expression is not None:
last_line = lines.index_to_line_number(last_expression.start_index)
# check if violation should be expanded to more of the same line
if last_line == lines.index_to_line_number(match.start()):
last_expression = Expression(
last_expression.start_index, match.end(), template=file_contents
)
else:
results.violations.append(ExpressionRuleViolation(
rule, last_expression
))
found_new_violation = True
else:
found_new_violation = True
if found_new_violation:
last_expression = Expression(
match.start(), match.end(), template=file_contents
)
# add final expression
if last_expression is not None:
results.violations.append(ExpressionRuleViolation(
rule, last_expression
))
class PythonLinter(BaseLinter):
"""
The linter for Python files.
The current implementation of the linter does naive Python parsing. It does
not use the parser. One known issue is that parsing errors found inside a
docstring need to be disabled, rather than being automatically skipped.
Skipping docstrings is an enhancement that could be added.
"""
LINE_COMMENT_DELIM = "#"
ruleset = RuleSet(
python_parse_error='python-parse-error',
python_custom_escape='python-custom-escape',
# The Visitor classes are python-specific and should be moved into the PythonLinter once they have
# been decoupled from the MakoTemplateLinter.
) + visitors.ruleset
def __init__(self, skip_dirs=None):
"""
Init method.
"""
super().__init__()
self._skip_python_dirs = skip_dirs or ()
def process_file(self, directory, file_name):
"""
Process file to determine if it is a Python file and
if it is safe.
Arguments:
directory (string): The directory of the file to be checked
file_name (string): A filename for a potential Python file
Returns:
The file results containing any violations.
"""
file_full_path = os.path.normpath(directory + '/' + file_name)
results = FileResults(file_full_path)
if not results.is_file:
return results
if file_name.lower().endswith('.py') is False:
return results
# skip tests.py files
# TODO: Add configuration for files and paths
if file_name.lower().endswith('tests.py'):
return results
# skip this linter code (i.e. xss_linter.py)
if file_name == os.path.basename(__file__):
return results
if not self._is_valid_directory(self._skip_python_dirs, directory):
return results
return self._load_and_check_file_is_safe(file_full_path, self.check_python_file_is_safe, results)
def check_python_file_is_safe(self, file_contents, results):
"""
Checks for violations in a Python file.
Arguments:
file_contents: The contents of the Python file.
results: A file results objects to which violations will be added.
"""
root_node = self.parse_python_code(file_contents, results)
self.check_python_code_is_safe(file_contents, root_node, results)
# Check rules specific to .py files only
# Note that in template files, the scope is different, so you can make
# different assumptions.
if root_node is not None:
# check format() rules that can be run on outer-most format() calls
visitor = visitors.OuterFormatVisitor(file_contents, results)
visitor.visit(root_node)
results.prepare_results(file_contents, line_comment_delim=self.LINE_COMMENT_DELIM)
def check_python_code_is_safe(self, python_code, root_node, results):
"""
Checks for violations in Python code snippet. This can also be used for
Python that appears in files other than .py files, like in templates.
Arguments:
python_code: The contents of the Python code.
root_node: The root node of the Python code parsed by AST.
results: A file results objects to which violations will be added.
"""
if root_node is not None:
# check illegal concatenation and interpolation
visitor = visitors.AllNodeVisitor(python_code, results)
visitor.visit(root_node)
# check rules parse with regex
self._check_custom_escape(python_code, results)
def parse_python_code(self, python_code, results):
"""
Parses Python code.
Arguments:
python_code: The Python code to be parsed.
Returns:
The root node that was parsed, or None for SyntaxError.
"""
python_code = self._strip_file_encoding(python_code)
try:
return ast.parse(python_code)
except SyntaxError as e:
if e.offset is None:
expression = Expression(0)
else:
lines = StringLines(python_code)
line_start_index = lines.line_number_to_start_index(e.lineno)
expression = Expression(line_start_index + e.offset)
results.violations.append(ExpressionRuleViolation(
self.ruleset.python_parse_error, expression
))
return None
def _strip_file_encoding(self, file_contents):
"""
Removes file encoding from file_contents because the file was already
read into Unicode, and the AST parser complains.
Arguments:
file_contents: The Python file contents.
Returns:
The Python file contents with the encoding stripped.
"""
# PEP-263 Provides Regex for Declaring Encoding
# Example: -*- coding: <encoding name> -*-
# This is only allowed on the first two lines, and it must be stripped
# before parsing, because we have already read into Unicode and the
# AST parser complains.
encoding_regex = re.compile(r"^[ \t\v]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)")
encoding_match = encoding_regex.search(file_contents)
# If encoding comment not found on first line, search second line.
if encoding_match is None:
lines = StringLines(file_contents)
if lines.line_count() >= 2:
encoding_match = encoding_regex.search(lines.line_number_to_line(2))
# If encoding was found, strip it
if encoding_match is not None:
file_contents = file_contents.replace(encoding_match.group(), '#', 1)
return file_contents
def _check_custom_escape(self, file_contents, results):
"""
Checks for custom escaping calls, rather than using a standard escaping
method.
Arguments:
file_contents: The contents of the Python file
results: A list of results into which violations will be added.
"""
for match in re.finditer("(<.*<|<.*<)", file_contents):
expression = Expression(match.start(), match.end())
results.violations.append(ExpressionRuleViolation(
self.ruleset.python_custom_escape, expression
))
class MakoTemplateLinter(BaseLinter):
"""
The linter for Mako template files.
"""
LINE_COMMENT_DELIM = "##"
ruleset = RuleSet(
mako_missing_default='mako-missing-default',
mako_multiple_page_tags='mako-multiple-page-tags',
mako_unparseable_expression='mako-unparseable-expression',
mako_unwanted_html_filter='mako-unwanted-html-filter',
mako_invalid_html_filter='mako-invalid-html-filter',
mako_invalid_js_filter='mako-invalid-js-filter',
mako_js_missing_quotes='mako-js-missing-quotes',
mako_js_html_string='mako-js-html-string',
mako_html_entities='mako-html-entities',
mako_unknown_context='mako-unknown-context',
# NOTE The MakoTemplateLinter directly checks for python_wrap_html and directly
# instantiates Visitor instances to check for python issues. This logic should
# be moved into the PythonLinter. The MakoTemplateLinter should only check for
# Mako-specific issues.
python_wrap_html='python-wrap-html',
) + visitors.ruleset
def __init__(self, javascript_linter, python_linter, skip_dirs=None):
"""
Init method.
"""
super().__init__()
self.javascript_linter = javascript_linter
self.python_linter = python_linter
self.ruleset = self.ruleset + self.javascript_linter.ruleset + self.python_linter.ruleset
self._skip_mako_dirs = skip_dirs or ()
def process_file(self, directory, file_name):
"""
Process file to determine if it is a Mako template file and
if it is safe.
Arguments:
directory (string): The directory of the file to be checked
file_name (string): A filename for a potential Mako file
Returns:
The file results containing any violations.
"""
mako_file_full_path = os.path.normpath(directory + '/' + file_name)
results = FileResults(mako_file_full_path)
if not results.is_file:
return results
if not self._is_valid_directory(directory):
return results
# TODO: When safe-by-default is turned on at the platform level, will we:
# 1. Turn it on for .html only, or
# 2. Turn it on for all files, and have different rulesets that have
# different rules of .xml, .html, .js, .txt Mako templates (e.g. use
# the n filter to turn off h for some of these)?
# For now, we only check .html and .xml files
if not (file_name.lower().endswith('.html') or file_name.lower().endswith('.xml')):
return results
return self._load_and_check_file_is_safe(mako_file_full_path, self._check_mako_file_is_safe, results)
def _is_valid_directory(self, directory):
"""
Determines if the provided directory is a directory that could contain
Mako template files that need to be linted.
Arguments:
directory: The directory to be linted.
Returns:
True if this directory should be linted for Mako template violations
and False otherwise.
"""
if is_skip_dir(self._skip_mako_dirs, directory):
return False
# TODO: This is an imperfect guess concerning the Mako template
# directories. This needs to be reviewed before turning on safe by
# default at the platform level.
if ('/templates/' in directory) or directory.endswith('/templates'):
return True
return False
def _check_mako_file_is_safe(self, mako_template, results):
"""
Checks for violations in a Mako template.
Arguments:
mako_template: The contents of the Mako template.
results: A file results objects to which violations will be added.
"""
if self._is_django_template(mako_template):
return
has_page_default = self._has_page_default(mako_template, results)
self._check_mako_expressions(mako_template, has_page_default, results)
self._check_mako_python_blocks(mako_template, has_page_default, results)
results.prepare_results(mako_template, line_comment_delim=self.LINE_COMMENT_DELIM)
def _is_django_template(self, mako_template):
"""
Determines if the template is actually a Django template.
Arguments:
mako_template: The template code.
Returns:
True if this is really a Django template, and False otherwise.
"""
if re.search('({%.*%})|({{.*}})|({#.*#})', mako_template) is not None:
return True
return False
def _get_page_tag_count(self, mako_template):
"""
Determines the number of page expressions in the Mako template. Ignores
page expressions that are commented out.
Arguments:
mako_template: The contents of the Mako template.
Returns:
The number of page expressions
"""
count = len(re.findall('<%page ', mako_template, re.IGNORECASE))
count_commented = len(re.findall(r'##\s+<%page ', mako_template, re.IGNORECASE))
return max(0, count - count_commented)
def _has_page_default(self, mako_template, results):
"""
Checks if the Mako template contains the page expression marking it as
safe by default.
Arguments:
mako_template: The contents of the Mako template.
results: A list of results into which violations will be added.
Side effect:
Adds violations regarding page default if necessary
Returns:
True if the template has the page default, and False otherwise.
"""
page_tag_count = self._get_page_tag_count(mako_template)
# check if there are too many page expressions
if 2 <= page_tag_count:
results.violations.append(RuleViolation(self.ruleset.mako_multiple_page_tags))
return False
# make sure there is exactly 1 page expression, excluding commented out
# page expressions, before proceeding
elif page_tag_count != 1:
results.violations.append(RuleViolation(self.ruleset.mako_missing_default))
return False
# check that safe by default (h filter) is turned on
page_h_filter_regex = re.compile('<%page[^>]*expression_filter=(?:"h"|\'h\')[^>]*/>')
page_match = page_h_filter_regex.search(mako_template)
if not page_match:
results.violations.append(RuleViolation(self.ruleset.mako_missing_default))
return page_match
def _check_mako_expressions(self, mako_template, has_page_default, results):
"""
Searches for Mako expressions and then checks if they contain
violations, including checking JavaScript contexts for JavaScript
violations.
Arguments:
mako_template: The contents of the Mako template.
has_page_default: True if the page is marked as default, False
otherwise.
results: A list of results into which violations will be added.
"""
expressions = self._find_mako_expressions(mako_template)
contexts = self._get_contexts(mako_template)
self._check_javascript_contexts(mako_template, contexts, results)
for expression in expressions:
if expression.end_index is None:
results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_unparseable_expression, expression
))
continue
context = self._get_context(contexts, expression.start_index)
self._check_expression_and_filters(mako_template, expression, context, has_page_default, results)
def _check_javascript_contexts(self, mako_template, contexts, results):
"""
Lint the JavaScript contexts for JavaScript violations inside a Mako
template.
Arguments:
mako_template: The contents of the Mako template.
contexts: A list of context dicts with 'type' and 'index'.
results: A list of results into which violations will be added.
Side effect:
Adds JavaScript violations to results.
"""
javascript_start_index = None
for context in contexts:
if context['type'] == 'javascript':
if javascript_start_index is None:
javascript_start_index = context['index']
else:
if javascript_start_index is not None:
javascript_end_index = context['index']
javascript_code = mako_template[javascript_start_index:javascript_end_index]
self._check_javascript_context(javascript_code, javascript_start_index, results)
javascript_start_index = None
if javascript_start_index is not None:
javascript_code = mako_template[javascript_start_index:]
self._check_javascript_context(javascript_code, javascript_start_index, results)
def _check_javascript_context(self, javascript_code, start_offset, results):
"""
Lint a single JavaScript context for JavaScript violations inside a Mako
template.
Arguments:
javascript_code: The template contents of the JavaScript context.
start_offset: The offset of the JavaScript context inside the
original Mako template.
results: A list of results into which violations will be added.
Side effect:
Adds JavaScript violations to results.
"""
javascript_results = FileResults("")
self.javascript_linter.check_javascript_file_is_safe(javascript_code, javascript_results)
self._shift_and_add_violations(javascript_results, start_offset, results)
def _check_mako_python_blocks(self, mako_template, has_page_default, results):
"""
Searches for Mako python blocks and checks if they contain
violations.
Arguments:
mako_template: The contents of the Mako template.
has_page_default: True if the page is marked as default, False
otherwise.
results: A list of results into which violations will be added.
"""
# Finds Python blocks such as <% ... %>, skipping other Mako start tags
# such as <%def> and <%page>.
python_block_regex = re.compile(r'<%\s(?P<code>.*?)%>', re.DOTALL)
for python_block_match in python_block_regex.finditer(mako_template):
self._check_expression_python(
python_code=python_block_match.group('code'),
start_offset=(python_block_match.start() + len('<% ')),
has_page_default=has_page_default,
results=results
)
def _check_expression_python(self, python_code, start_offset, has_page_default, results):
"""
Lint the Python inside a single Python expression in a Mako template.
Arguments:
python_code: The Python contents of an expression.
start_offset: The offset of the Python content inside the original
Mako template.
has_page_default: True if the page is marked as default, False
otherwise.
results: A list of results into which violations will be added.
Side effect:
Adds Python violations to results.
"""
python_results = FileResults("")
# Dedent expression internals so it is parseable.
# Note that the final columns reported could be off somewhat.
adjusted_python_code = textwrap.dedent(python_code)
first_letter_match = re.search(r'\w', python_code)
adjusted_first_letter_match = re.search(r'\w', adjusted_python_code)
if first_letter_match is not None and adjusted_first_letter_match is not None:
start_offset += (first_letter_match.start() - adjusted_first_letter_match.start())
python_code = adjusted_python_code
root_node = self.python_linter.parse_python_code(python_code, python_results)
self.python_linter.check_python_code_is_safe(python_code, root_node, python_results)
# Check mako expression specific Python rules.
if root_node is not None:
visitor = visitors.HtmlStringVisitor(python_code, python_results, True)
visitor.visit(root_node)
for unsafe_html_string_node in visitor.unsafe_html_string_nodes:
python_results.violations.append(ExpressionRuleViolation(
self.ruleset.python_wrap_html, visitor.node_to_expression(unsafe_html_string_node)
))
if has_page_default:
for over_escaped_entity_string_node in visitor.over_escaped_entity_string_nodes:
python_results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_html_entities, visitor.node_to_expression(over_escaped_entity_string_node)
))
python_results.prepare_results(python_code, line_comment_delim=self.LINE_COMMENT_DELIM)
self._shift_and_add_violations(python_results, start_offset, results)
def _shift_and_add_violations(self, other_linter_results, start_offset, results):
"""
Adds results from a different linter to the Mako results, after shifting
the offset into the original Mako template.
Arguments:
other_linter_results: Results from another linter.
start_offset: The offset of the linted code, a part of the template,
inside the original Mako template.
results: A list of results into which violations will be added.
Side effect:
Adds violations to results.
"""
# translate the violations into the proper location within the original
# Mako template
for violation in other_linter_results.violations:
expression = violation.expression
expression.start_index += start_offset
if expression.end_index is not None:
expression.end_index += start_offset
results.violations.append(ExpressionRuleViolation(violation.rule, expression))
def _check_expression_and_filters(self, mako_template, expression, context, has_page_default, results):
"""
Checks that the filters used in the given Mako expression are valid
for the given context. Adds violation to results if there is a problem.
Arguments:
mako_template: The contents of the Mako template.
expression: A Mako Expression.
context: The context of the page in which the expression was found
(e.g. javascript, html).
has_page_default: True if the page is marked as default, False
otherwise.
results: A list of results into which violations will be added.
"""
if context == 'unknown':
results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_unknown_context, expression
))
return
# Example: finds "| n, h}" when given "${x | n, h}"
filters_regex = re.compile(r'\|([.,\w\s]*)\}')
filters_match = filters_regex.search(expression.expression)
# Check Python code inside expression.
if filters_match is None:
python_code = expression.expression[2:-1]
else:
python_code = expression.expression[2:filters_match.start()]
self._check_expression_python(python_code, expression.start_index + 2, has_page_default, results)
# Check filters.
if filters_match is None:
if context == 'javascript':
results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_invalid_js_filter, expression
))
return
filters = filters_match.group(1).replace(" ", "").split(",")
if filters == ['n', 'decode.utf8']:
# {x | n, decode.utf8} is valid in any context
pass
elif context == 'html':
if filters == ['h']:
if has_page_default:
# suppress this violation if the page default hasn't been set,
# otherwise the template might get less safe
results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_unwanted_html_filter, expression
))
elif filters == ['n', 'strip_all_tags_but_br']:
# {x | n, strip_all_tags_but_br} is valid in html context
pass
else:
results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_invalid_html_filter, expression
))
elif context == 'javascript':
self._check_js_expression_not_with_html(mako_template, expression, results)
if filters == ['n', 'dump_js_escaped_json']:
# {x | n, dump_js_escaped_json} is valid
pass
elif filters == ['n', 'js_escaped_string']:
# {x | n, js_escaped_string} is valid, if surrounded by quotes
self._check_js_string_expression_in_quotes(mako_template, expression, results)
else:
results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_invalid_js_filter, expression
))
def _check_js_string_expression_in_quotes(self, mako_template, expression, results):
"""
Checks that a Mako expression using js_escaped_string is surrounded by
quotes.
Arguments:
mako_template: The contents of the Mako template.
expression: A Mako Expression.
results: A list of results into which violations will be added.
"""
parse_string = self._find_string_wrapping_expression(mako_template, expression)
if parse_string is None:
results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_js_missing_quotes, expression
))
def _check_js_expression_not_with_html(self, mako_template, expression, results):
"""
Checks that a Mako expression in a JavaScript context does not appear in
a string that also contains HTML.
Arguments:
mako_template: The contents of the Mako template.
expression: A Mako Expression.
results: A list of results into which violations will be added.
"""
parse_string = self._find_string_wrapping_expression(mako_template, expression)
if parse_string is not None and re.search('[<>]', parse_string.string) is not None:
results.violations.append(ExpressionRuleViolation(
self.ruleset.mako_js_html_string, expression
))
def _find_string_wrapping_expression(self, mako_template, expression):
"""
Finds the string wrapping the Mako expression if there is one.
Arguments:
mako_template: The contents of the Mako template.
expression: A Mako Expression.
Returns:
ParseString representing a scrubbed version of the wrapped string,
where the Mako expression was replaced with "${...}", if a wrapped
string was found. Otherwise, returns None if none found.
"""
lines = StringLines(mako_template)
start_index = lines.index_to_line_start_index(expression.start_index)
if expression.end_index is not None:
end_index = lines.index_to_line_end_index(expression.end_index)
else:
return None
# scrub out the actual expression so any code inside the expression
# doesn't interfere with rules applied to the surrounding code (i.e.
# checking JavaScript).
scrubbed_lines = "".join((
mako_template[start_index:expression.start_index],
"${...}",
mako_template[expression.end_index:end_index]
))
adjusted_start_index = expression.start_index - start_index
start_index = 0
while True:
parse_string = ParseString(scrubbed_lines, start_index, len(scrubbed_lines))
# check for validly parsed string
if (parse_string.start_index is not None and parse_string.end_index is not None) \
and (0 <= parse_string.start_index < parse_string.end_index):
# check if expression is contained in the given string
if parse_string.start_index < adjusted_start_index < parse_string.end_index:
return parse_string
else:
# move to check next string
start_index = parse_string.end_index
else:
break
return None
def _get_contexts(self, mako_template):
"""
Returns a data structure that represents the indices at which the
template changes from HTML context to JavaScript and back.
Return:
A list of dicts where each dict contains:
- index: the index of the context.
- type: the context type (e.g. 'html' or 'javascript').
"""
contexts_re = re.compile(
r"""
<script.*?(?<!/)> | # script tag start
</script> | # script tag end
<%static:require_module(_async)?.*?(?<!/)> | # require js script tag start (optionally the _async version)
</%static:require_module(_async)?> | # require js script tag end (optionally the _async version)
<%static:webpack.*(?<!/)> | # webpack script tag start
</%static:webpack> | # webpack script tag end
<%static:studiofrontend.*?(?<!/)> | # studiofrontend script tag start
</%static:studiofrontend> | # studiofrontend script tag end
<%block[ ]*name=['"]requirejs['"]\w*(?<!/)> | # require js tag start
</%block> # require js tag end
""",
re.VERBOSE | re.IGNORECASE
)
media_type_re = re.compile(r"""type=['"].*?['"]""", re.IGNORECASE)
contexts = [{'index': 0, 'type': 'html'}]
javascript_types = [
'text/javascript', 'text/ecmascript', 'application/ecmascript', 'application/javascript',
'text/x-mathjax-config', 'json/xblock-args', 'application/json',
]
html_types = ['text/template']
for context in contexts_re.finditer(mako_template):
match_string = context.group().lower()
if match_string.startswith("<script"):
match_type = media_type_re.search(match_string)
context_type = 'javascript'
if match_type is not None:
# get media type (e.g. get text/javascript from
# type="text/javascript")
match_type = match_type.group()[6:-1].lower()
if match_type in html_types:
context_type = 'html'
elif match_type not in javascript_types:
context_type = 'unknown'
contexts.append({'index': context.end(), 'type': context_type})
elif match_string.startswith("</"):
contexts.append({'index': context.start(), 'type': 'html'})
else:
contexts.append({'index': context.end(), 'type': 'javascript'})
return contexts
def _get_context(self, contexts, index):
"""
Gets the context (e.g. javascript, html) of the template at the given
index.
Arguments:
contexts: A list of dicts where each dict contains the 'index' of the context
and the context 'type' (e.g. 'html' or 'javascript').
index: The index for which we want the context.
Returns:
The context (e.g. javascript or html) for the given index.
"""
current_context = contexts[0]['type']
for context in contexts:
if context['index'] <= index:
current_context = context['type']
else:
break
return current_context
def _find_mako_expressions(self, mako_template):
"""
Finds all the Mako expressions in a Mako template and creates a list
of dicts for each expression.
Arguments:
mako_template: The content of the Mako template.
Returns:
A list of Expressions.
"""
start_delim = '${'
start_index = 0
expressions = []
while True:
start_index = mako_template.find(start_delim, start_index)
if start_index < 0:
break
# If start of mako expression is commented out, skip it.
uncommented_start_index = self._uncommented_start_index(mako_template, start_index)
if uncommented_start_index != start_index:
start_index = uncommented_start_index
continue
result = self._find_closing_char_index(
start_delim, '{', '}', mako_template, start_index=start_index + len(start_delim)
)
if result is None:
expression = Expression(start_index)
# for parsing error, restart search right after the start of the
# current expression
start_index = start_index + len(start_delim)
else:
close_char_index = result['close_char_index']
expression = mako_template[start_index:close_char_index + 1]
expression = Expression(
start_index,
end_index=close_char_index + 1,
template=mako_template,
start_delim=start_delim,
end_delim='}',
strings=result['strings'],
)
# restart search after the current expression
start_index = expression.end_index
expressions.append(expression)
return expressions
class DjangoTemplateLinter(BaseLinter):
"""
The linter for Django template files
"""
LINE_COMMENT_DELIM = "{#"
ruleset = RuleSet(
django_trans_missing_escape='django-trans-missing-escape',
django_trans_invalid_escape_filter='django-trans-invalid-escape-filter',
django_trans_escape_variable_mismatch='django-trans-escape-variable-mismatch',
django_blocktrans_missing_escape_filter='django-blocktrans-missing-escape-filter',
django_blocktrans_parse_error='django-blocktrans-parse-error',
django_blocktrans_escape_filter_parse_error='django-blocktrans-escape-filter-parse-error',
django_html_interpolation_missing_safe_filter='django-html-interpolation-missing-safe-filter',
django_html_interpolation_missing='django-html-interpolation-missing',
django_html_interpolation_invalid_tag='django-html-interpolation-invalid-tag',
)
def __init__(self, skip_dirs=None):
"""
Init method.
"""
super().__init__()
self._skip_django_dirs = skip_dirs or ()
def process_file(self, directory, file_name):
"""
Process file to determine if it is a Django template file and
if it is safe.
Arguments:
directory (string): The directory of the file to be checked
file_name (string): A filename for a potential Django file
Returns:
The file results containing any violations.
"""
django_file_full_path = os.path.normpath(directory + '/' + file_name)
results = FileResults(django_file_full_path)
if not results.is_file:
return results
if not self._is_valid_directory(directory):
return results
if not (file_name.lower().endswith('.html')):
return results
return self._load_and_check_file_is_safe(django_file_full_path, self._check_django_file_is_safe, results)
def _is_valid_directory(self, directory):
"""
Determines if the provided directory is a directory that could contain
Django template files that need to be linted.
Arguments:
directory: The directory to be linted.
Returns:
True if this directory should be linted for Django template violations
and False otherwise.
"""
if is_skip_dir(self._skip_django_dirs, directory):
return False
if ('/templates/' in directory) or directory.endswith('/templates'):
return True
return False
def _is_django_template(self, django_template):
"""
Determines if the template is actually a Django template.
Arguments:
mako_template: The template code.
Returns:
True if this is really a Django template, and False otherwise.
"""
if re.search('({%.*%})|({{.*}})|({#.*#})', django_template) is not None:
return True
return False
def _check_django_file_is_safe(self, django_template, results):
if not self._is_django_template(django_template):
return
self._check_django_expression(django_template, results)
results.prepare_results(django_template, line_comment_delim=self.LINE_COMMENT_DELIM)
def _check_django_expression(self, django_template, results):
"""
Searches for django trans and blocktrans expression and then checks
if they contain violations
Arguments:
django_template: The contents of the Django template.
results: A list of results into which violations will be added.
"""
expressions = []
self._find_django_expressions(django_template, results, expressions)
for expr in expressions:
expr.validate_expression(django_template, expressions)
def _find_django_expressions(self, django_template, results, expressions):
"""
Finds all the Django trans/blocktrans expressions in a Django template
and creates a list of dicts for each expression.
Arguments:
django_template: The content of the Django template.
Returns:
A list of Expressions.
"""
comments = list(re.finditer(r'{% comment .*%}', django_template, re.I))
endcomments = list(re.finditer(r'{% endcomment .*%}', django_template, re.I))
trans_iterator = re.finditer(r'{% trans .*?%}', django_template, re.I)
for t in trans_iterator:
if self._check_expression_not_commented(t, comments, endcomments):
continue
trans_expr = TransExpression(self.ruleset, results, t.start(), t.end(),
start_delim='{%', end_delim='%}',
template=django_template)
if trans_expr:
expressions.append(trans_expr)
block_trans_iterator = re.finditer(r'{% blocktrans .*?%}', django_template, re.I)
for bt in block_trans_iterator:
if self._check_expression_not_commented(bt, comments, endcomments):
continue
trans_expr = BlockTransExpression(self.ruleset, results, bt.start(), bt.end(),
start_delim='{%', end_delim='%}',
template=django_template)
if trans_expr:
expressions.append(trans_expr)
interpolation_iterator = re.finditer(r'{% interpolate_html .*?%}', django_template, re.I)
for it in interpolation_iterator:
if self._check_expression_not_commented(it, comments, endcomments):
continue
trans_expr = HtmlInterpolateExpression(self.ruleset, results,
it.start(), it.end(),
start_delim='{%', end_delim='%}',
template=django_template)
if trans_expr:
expressions.append(trans_expr)
def _check_expression_not_commented(self, expr, comments, endcomments):
for i in range(len(endcomments)):
start_comment = comments[i]
end_comment = endcomments[i]
if (expr.start() >= start_comment.start()) and \
(expr.start() <= end_comment.start()):
return True
| agpl-3.0 |
flying-circus/pyfilesystem | fs/expose/django_storage.py | 7 | 1948 | """
fs.expose.django
================
Use an FS object for Django File Storage
This module exposes the class "FSStorage", a simple adapter for using FS
objects as Django storage objects. Simply include the following lines
in your settings.py::
DEFAULT_FILE_STORAGE = fs.expose.django_storage.FSStorage
DEFAULT_FILE_STORAGE_FS = OSFS('foo/bar') # Or whatever FS
"""
from django.conf import settings
from django.core.files.storage import Storage
from django.core.files import File
from fs.path import abspath, dirname
from fs.errors import convert_fs_errors, ResourceNotFoundError
class FSStorage(Storage):
"""Expose an FS object as a Django File Storage object."""
def __init__(self, fs=None, base_url=None):
"""
:param fs: an FS object
:param base_url: The url to prepend to the path
"""
if fs is None:
fs = settings.DEFAULT_FILE_STORAGE_FS
if base_url is None:
base_url = settings.MEDIA_URL
base_url = base_url.rstrip('/')
self.fs = fs
self.base_url = base_url
def exists(self, name):
return self.fs.isfile(name)
def path(self, name):
path = self.fs.getsyspath(name)
if path is None:
raise NotImplementedError
return path
@convert_fs_errors
def size(self, name):
return self.fs.getsize(name)
@convert_fs_errors
def url(self, name):
return self.base_url + abspath(name)
@convert_fs_errors
def _open(self, name, mode):
return File(self.fs.open(name, mode))
@convert_fs_errors
def _save(self, name, content):
self.fs.makedir(dirname(name), allow_recreate=True, recursive=True)
self.fs.setcontents(name, content)
return name
@convert_fs_errors
def delete(self, name):
try:
self.fs.remove(name)
except ResourceNotFoundError:
pass
| bsd-3-clause |
normanmaurer/autobahntestsuite-maven-plugin | src/main/resources/twisted/web/test/test_webclient.py | 1 | 38926 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the old L{twisted.web.client} APIs, C{getPage} and friends.
"""
from __future__ import division, absolute_import
import os
from errno import ENOSPC
try:
from urlparse import urlparse, urljoin
except ImportError:
from urllib.parse import urlparse, urljoin
from twisted.python.compat import _PY3, networkString, nativeString, intToBytes
from twisted.trial import unittest
from twisted.web import server, client, error, resource
from twisted.internet import reactor, defer, interfaces
from twisted.python.filepath import FilePath
from twisted.python.log import msg
from twisted.protocols.policies import WrappingFactory
from twisted.test.proto_helpers import StringTransport
try:
from twisted.internet import ssl
except:
ssl = None
from twisted import test
serverPEM = FilePath(test.__file__.encode("utf-8")).sibling(b'server.pem')
serverPEMPath = nativeString(serverPEM.path)
# Remove this in #6177, when static is ported to Python 3:
if _PY3:
from twisted.web.test.test_web import Data
else:
from twisted.web.static import Data
# Remove this in #6178, when util is ported to Python 3:
if _PY3:
class Redirect(resource.Resource):
isLeaf = 1
def __init__(self, url):
resource.Resource.__init__(self)
self.url = url
def render(self, request):
request.redirect(self.url)
return b""
def getChild(self, name, request):
return self
else:
from twisted.web.util import Redirect
_PY3DownloadSkip = "downloadPage will be ported to Python 3 in ticket #6197."
class ExtendedRedirect(resource.Resource):
"""
Redirection resource.
The HTTP status code is set according to the C{code} query parameter.
@type lastMethod: C{str}
@ivar lastMethod: Last handled HTTP request method
"""
isLeaf = 1
lastMethod = None
def __init__(self, url):
resource.Resource.__init__(self)
self.url = url
def render(self, request):
if self.lastMethod:
self.lastMethod = request.method
return b"OK Thnx!"
else:
self.lastMethod = request.method
code = int(request.args[b'code'][0])
return self.redirectTo(self.url, request, code)
def getChild(self, name, request):
return self
def redirectTo(self, url, request, code):
request.setResponseCode(code)
request.setHeader(b"location", url)
return b"OK Bye!"
class ForeverTakingResource(resource.Resource):
"""
L{ForeverTakingResource} is a resource which never finishes responding
to requests.
"""
def __init__(self, write=False):
resource.Resource.__init__(self)
self._write = write
def render(self, request):
if self._write:
request.write(b'some bytes')
return server.NOT_DONE_YET
class CookieMirrorResource(resource.Resource):
def render(self, request):
l = []
for k,v in sorted(list(request.received_cookies.items())):
l.append((nativeString(k), nativeString(v)))
l.sort()
return networkString(repr(l))
class RawCookieMirrorResource(resource.Resource):
def render(self, request):
header = request.getHeader(b'cookie')
if header is None:
return b'None'
return networkString(repr(nativeString(header)))
class ErrorResource(resource.Resource):
def render(self, request):
request.setResponseCode(401)
if request.args.get(b"showlength"):
request.setHeader(b"content-length", b"0")
return b""
class NoLengthResource(resource.Resource):
def render(self, request):
return b"nolength"
class HostHeaderResource(resource.Resource):
"""
A testing resource which renders itself as the value of the host header
from the request.
"""
def render(self, request):
return request.received_headers[b'host']
class PayloadResource(resource.Resource):
"""
A testing resource which renders itself as the contents of the request body
as long as the request body is 100 bytes long, otherwise which renders
itself as C{"ERROR"}.
"""
def render(self, request):
data = request.content.read()
contentLength = request.received_headers[b'content-length']
if len(data) != 100 or int(contentLength) != 100:
return b"ERROR"
return data
class DelayResource(resource.Resource):
def __init__(self, seconds):
self.seconds = seconds
def render(self, request):
def response():
request.write(b'some bytes')
request.finish()
reactor.callLater(self.seconds, response)
return server.NOT_DONE_YET
class BrokenDownloadResource(resource.Resource):
def render(self, request):
# only sends 3 bytes even though it claims to send 5
request.setHeader(b"content-length", b"5")
request.write(b'abc')
return b''
class CountingRedirect(Redirect):
"""
A L{Redirect} resource that keeps track of the number of times the
resource has been accessed.
"""
def __init__(self, *a, **kw):
Redirect.__init__(self, *a, **kw)
self.count = 0
def render(self, request):
self.count += 1
return Redirect.render(self, request)
class CountingResource(resource.Resource):
"""
A resource that keeps track of the number of times it has been accessed.
"""
def __init__(self):
resource.Resource.__init__(self)
self.count = 0
def render(self, request):
self.count += 1
return b"Success"
class ParseUrlTestCase(unittest.TestCase):
"""
Test URL parsing facility and defaults values.
"""
def test_parse(self):
"""
L{client._parse} correctly parses a URL into its various components.
"""
# The default port for HTTP is 80.
self.assertEqual(
client._parse(b'http://127.0.0.1/'),
(b'http', b'127.0.0.1', 80, b'/'))
# The default port for HTTPS is 443.
self.assertEqual(
client._parse(b'https://127.0.0.1/'),
(b'https', b'127.0.0.1', 443, b'/'))
# Specifying a port.
self.assertEqual(
client._parse(b'http://spam:12345/'),
(b'http', b'spam', 12345, b'/'))
# Weird (but commonly accepted) structure uses default port.
self.assertEqual(
client._parse(b'http://spam:/'),
(b'http', b'spam', 80, b'/'))
# Spaces in the hostname are trimmed, the default path is /.
self.assertEqual(
client._parse(b'http://foo '),
(b'http', b'foo', 80, b'/'))
def test_externalUnicodeInterference(self):
"""
L{client._parse} should return C{bytes} for the scheme, host, and path
elements of its return tuple, even when passed an URL which has
previously been passed to L{urlparse} as a C{unicode} string.
"""
badInput = u'http://example.com/path'
goodInput = badInput.encode('ascii')
urlparse(badInput)
scheme, host, port, path = client._parse(goodInput)
self.assertIsInstance(scheme, bytes)
self.assertIsInstance(host, bytes)
self.assertIsInstance(path, bytes)
class URLJoinTests(unittest.TestCase):
"""
Tests for L{client._urljoin}.
"""
def test_noFragments(self):
"""
L{client._urljoin} does not include a fragment identifier in the
resulting URL if neither the base nor the new path include a fragment
identifier.
"""
self.assertEquals(
client._urljoin(b'http://foo.com/bar', b'/quux'),
b'http://foo.com/quux')
self.assertEquals(
client._urljoin(b'http://foo.com/bar#', b'/quux'),
b'http://foo.com/quux')
self.assertEquals(
client._urljoin(b'http://foo.com/bar', b'/quux#'),
b'http://foo.com/quux')
def test_preserveFragments(self):
"""
L{client._urljoin} preserves the fragment identifier from either the
new path or the base URL respectively, as specified in the HTTP 1.1 bis
draft.
@see: U{https://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-22#section-7.1.2}
"""
self.assertEquals(
client._urljoin(b'http://foo.com/bar#frag', b'/quux'),
b'http://foo.com/quux#frag')
self.assertEquals(
client._urljoin(b'http://foo.com/bar', b'/quux#frag2'),
b'http://foo.com/quux#frag2')
self.assertEquals(
client._urljoin(b'http://foo.com/bar#frag', b'/quux#frag2'),
b'http://foo.com/quux#frag2')
class HTTPPageGetterTests(unittest.TestCase):
"""
Tests for L{HTTPPagerGetter}, the HTTP client protocol implementation
used to implement L{getPage}.
"""
def test_earlyHeaders(self):
"""
When a connection is made, L{HTTPPagerGetter} sends the headers from
its factory's C{headers} dict. If I{Host} or I{Content-Length} is
present in this dict, the values are not sent, since they are sent with
special values before the C{headers} dict is processed. If
I{User-Agent} is present in the dict, it overrides the value of the
C{agent} attribute of the factory. If I{Cookie} is present in the
dict, its value is added to the values from the factory's C{cookies}
attribute.
"""
factory = client.HTTPClientFactory(
b'http://foo/bar',
agent=b"foobar",
cookies={b'baz': b'quux'},
postdata=b"some data",
headers={
b'Host': b'example.net',
b'User-Agent': b'fooble',
b'Cookie': b'blah blah',
b'Content-Length': b'12981',
b'Useful': b'value'})
transport = StringTransport()
protocol = client.HTTPPageGetter()
protocol.factory = factory
protocol.makeConnection(transport)
result = transport.value()
for expectedHeader in [
b"Host: example.net\r\n",
b"User-Agent: foobar\r\n",
b"Content-Length: 9\r\n",
b"Useful: value\r\n",
b"connection: close\r\n",
b"Cookie: blah blah; baz=quux\r\n"]:
self.assertIn(expectedHeader, result)
class WebClientTestCase(unittest.TestCase):
def _listen(self, site):
return reactor.listenTCP(0, site, interface="127.0.0.1")
def setUp(self):
self.agent = None # for twisted.web.client.Agent test
self.cleanupServerConnections = 0
r = resource.Resource()
r.putChild(b"file", Data(b"0123456789", b"text/html"))
r.putChild(b"redirect", Redirect(b"/file"))
self.infiniteRedirectResource = CountingRedirect(b"/infiniteRedirect")
r.putChild(b"infiniteRedirect", self.infiniteRedirectResource)
r.putChild(b"wait", ForeverTakingResource())
r.putChild(b"write-then-wait", ForeverTakingResource(write=True))
r.putChild(b"error", ErrorResource())
r.putChild(b"nolength", NoLengthResource())
r.putChild(b"host", HostHeaderResource())
r.putChild(b"payload", PayloadResource())
r.putChild(b"broken", BrokenDownloadResource())
r.putChild(b"cookiemirror", CookieMirrorResource())
r.putChild(b'delay1', DelayResource(1))
r.putChild(b'delay2', DelayResource(2))
self.afterFoundGetCounter = CountingResource()
r.putChild(b"afterFoundGetCounter", self.afterFoundGetCounter)
r.putChild(b"afterFoundGetRedirect", Redirect(b"/afterFoundGetCounter"))
miscasedHead = Data(b"miscased-head GET response content", b"major/minor")
miscasedHead.render_Head = lambda request: b"miscased-head content"
r.putChild(b"miscased-head", miscasedHead)
self.extendedRedirect = ExtendedRedirect(b'/extendedRedirect')
r.putChild(b"extendedRedirect", self.extendedRedirect)
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
self.port = self._listen(self.wrapper)
self.portno = self.port.getHost().port
def tearDown(self):
if self.agent:
# clean up connections for twisted.web.client.Agent test.
self.agent.closeCachedConnections()
self.agent = None
# If the test indicated it might leave some server-side connections
# around, clean them up.
connections = list(self.wrapper.protocols.keys())
# If there are fewer server-side connections than requested,
# that's okay. Some might have noticed that the client closed
# the connection and cleaned up after themselves.
for n in range(min(len(connections), self.cleanupServerConnections)):
proto = connections.pop()
msg("Closing %r" % (proto,))
proto.transport.loseConnection()
if connections:
msg("Some left-over connections; this test is probably buggy.")
return self.port.stopListening()
def getURL(self, path):
host = "http://127.0.0.1:%d/" % self.portno
return networkString(urljoin(host, nativeString(path)))
def testPayload(self):
s = b"0123456789" * 10
return client.getPage(self.getURL("payload"), postdata=s
).addCallback(self.assertEqual, s
)
def test_getPageBrokenDownload(self):
"""
If the connection is closed before the number of bytes indicated by
I{Content-Length} have been received, the L{Deferred} returned by
L{getPage} fails with L{PartialDownloadError}.
"""
d = client.getPage(self.getURL("broken"))
d = self.assertFailure(d, client.PartialDownloadError)
d.addCallback(lambda exc: self.assertEqual(exc.response, b"abc"))
return d
def test_downloadPageBrokenDownload(self):
"""
If the connection is closed before the number of bytes indicated by
I{Content-Length} have been received, the L{Deferred} returned by
L{downloadPage} fails with L{PartialDownloadError}.
"""
# test what happens when download gets disconnected in the middle
path = FilePath(self.mktemp())
d = client.downloadPage(self.getURL("broken"), path.path)
d = self.assertFailure(d, client.PartialDownloadError)
def checkResponse(response):
"""
The HTTP status code from the server is propagated through the
C{PartialDownloadError}.
"""
self.assertEqual(response.status, b"200")
self.assertEqual(response.message, b"OK")
return response
d.addCallback(checkResponse)
def cbFailed(ignored):
self.assertEqual(path.getContent(), b"abc")
d.addCallback(cbFailed)
return d
def test_downloadPageLogsFileCloseError(self):
"""
If there is an exception closing the file being written to after the
connection is prematurely closed, that exception is logged.
"""
class BrokenFile:
def write(self, bytes):
pass
def close(self):
raise IOError(ENOSPC, "No file left on device")
d = client.downloadPage(self.getURL("broken"), BrokenFile())
d = self.assertFailure(d, client.PartialDownloadError)
def cbFailed(ignored):
self.assertEqual(len(self.flushLoggedErrors(IOError)), 1)
d.addCallback(cbFailed)
return d
def testHostHeader(self):
# if we pass Host header explicitly, it should be used, otherwise
# it should extract from url
return defer.gatherResults([
client.getPage(self.getURL("host")).addCallback(
self.assertEqual, b"127.0.0.1:" + intToBytes(self.portno)),
client.getPage(self.getURL("host"),
headers={b"Host": b"www.example.com"}).addCallback(
self.assertEqual, b"www.example.com")])
def test_getPage(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the body of the response if the default method B{GET} is used.
"""
d = client.getPage(self.getURL("file"))
d.addCallback(self.assertEqual, b"0123456789")
return d
def test_getPageHEAD(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the empty string if the method is I{HEAD} and there is a successful
response code.
"""
d = client.getPage(self.getURL("file"), method=b"HEAD")
d.addCallback(self.assertEqual, b"")
return d
def test_getPageNotQuiteHEAD(self):
"""
If the request method is a different casing of I{HEAD} (ie, not all
capitalized) then it is not a I{HEAD} request and the response body
is returned.
"""
d = client.getPage(self.getURL("miscased-head"), method=b'Head')
d.addCallback(self.assertEqual, b"miscased-head content")
return d
def test_timeoutNotTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and the page is
retrieved before the timeout period elapses, the L{Deferred} is
called back with the contents of the page.
"""
d = client.getPage(self.getURL("host"), timeout=100)
d.addCallback(self.assertEqual,
networkString("127.0.0.1:%s" % (self.portno,)))
return d
def test_timeoutTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and that many
seconds elapse before the server responds to the request. the
L{Deferred} is errbacked with a L{error.TimeoutError}.
"""
# This will probably leave some connections around.
self.cleanupServerConnections = 1
return self.assertFailure(
client.getPage(self.getURL("wait"), timeout=0.000001),
defer.TimeoutError)
def testDownloadPage(self):
downloads = []
downloadData = [(b"file", self.mktemp(), b"0123456789"),
(b"nolength", self.mktemp(), b"nolength")]
for (url, name, data) in downloadData:
d = client.downloadPage(self.getURL(url), name)
d.addCallback(self._cbDownloadPageTest, data, name)
downloads.append(d)
return defer.gatherResults(downloads)
def _cbDownloadPageTest(self, ignored, data, name):
bytes = file(name, "rb").read()
self.assertEqual(bytes, data)
def testDownloadPageError1(self):
class errorfile:
def write(self, data):
raise IOError("badness happened during write")
def close(self):
pass
ef = errorfile()
return self.assertFailure(
client.downloadPage(self.getURL("file"), ef),
IOError)
def testDownloadPageError2(self):
class errorfile:
def write(self, data):
pass
def close(self):
raise IOError("badness happened during close")
ef = errorfile()
return self.assertFailure(
client.downloadPage(self.getURL("file"), ef),
IOError)
def testDownloadPageError3(self):
# make sure failures in open() are caught too. This is tricky.
# Might only work on posix.
tmpfile = open("unwritable", "wb")
tmpfile.close()
os.chmod("unwritable", 0) # make it unwritable (to us)
d = self.assertFailure(
client.downloadPage(self.getURL("file"), "unwritable"),
IOError)
d.addBoth(self._cleanupDownloadPageError3)
return d
def _cleanupDownloadPageError3(self, ignored):
os.chmod("unwritable", 0o700)
os.unlink("unwritable")
return ignored
def _downloadTest(self, method):
dl = []
for (url, code) in [("nosuchfile", b"404"), ("error", b"401"),
("error?showlength=1", b"401")]:
d = method(url)
d = self.assertFailure(d, error.Error)
d.addCallback(lambda exc, code=code: self.assertEqual(exc.args[0], code))
dl.append(d)
return defer.DeferredList(dl, fireOnOneErrback=True)
def testServerError(self):
return self._downloadTest(lambda url: client.getPage(self.getURL(url)))
def testDownloadServerError(self):
return self._downloadTest(lambda url: client.downloadPage(self.getURL(url), url.split('?')[0]))
def testFactoryInfo(self):
url = self.getURL('file')
scheme, host, port, path = client._parse(url)
factory = client.HTTPClientFactory(url)
reactor.connectTCP(nativeString(host), port, factory)
return factory.deferred.addCallback(self._cbFactoryInfo, factory)
def _cbFactoryInfo(self, ignoredResult, factory):
self.assertEqual(factory.status, b'200')
self.assert_(factory.version.startswith(b'HTTP/'))
self.assertEqual(factory.message, b'OK')
self.assertEqual(factory.response_headers[b'content-length'][0], b'10')
def test_followRedirect(self):
"""
By default, L{client.getPage} follows redirects and returns the content
of the target resource.
"""
d = client.getPage(self.getURL("redirect"))
d.addCallback(self.assertEqual, b"0123456789")
return d
def test_noFollowRedirect(self):
"""
If C{followRedirect} is passed a false value, L{client.getPage} does not
follow redirects and returns a L{Deferred} which fails with
L{error.PageRedirect} when it encounters one.
"""
d = self.assertFailure(
client.getPage(self.getURL("redirect"), followRedirect=False),
error.PageRedirect)
d.addCallback(self._cbCheckLocation)
return d
def _cbCheckLocation(self, exc):
self.assertEqual(exc.location, b"/file")
def test_infiniteRedirection(self):
"""
When more than C{redirectLimit} HTTP redirects are encountered, the
page request fails with L{InfiniteRedirection}.
"""
def checkRedirectCount(*a):
self.assertEqual(f._redirectCount, 13)
self.assertEqual(self.infiniteRedirectResource.count, 13)
f = client._makeGetterFactory(
self.getURL('infiniteRedirect'),
client.HTTPClientFactory,
redirectLimit=13)
d = self.assertFailure(f.deferred, error.InfiniteRedirection)
d.addCallback(checkRedirectCount)
return d
def test_isolatedFollowRedirect(self):
"""
C{client.HTTPPagerGetter} instances each obey the C{followRedirect}
value passed to the L{client.getPage} call which created them.
"""
d1 = client.getPage(self.getURL('redirect'), followRedirect=True)
d2 = client.getPage(self.getURL('redirect'), followRedirect=False)
d = self.assertFailure(d2, error.PageRedirect
).addCallback(lambda dummy: d1)
return d
def test_afterFoundGet(self):
"""
Enabling unsafe redirection behaviour overwrites the method of
redirected C{POST} requests with C{GET}.
"""
url = self.getURL('extendedRedirect?code=302')
f = client.HTTPClientFactory(url, followRedirect=True, method=b"POST")
self.assertFalse(
f.afterFoundGet,
"By default, afterFoundGet must be disabled")
def gotPage(page):
self.assertEqual(
self.extendedRedirect.lastMethod,
b"GET",
"With afterFoundGet, the HTTP method must change to GET")
d = client.getPage(
url, followRedirect=True, afterFoundGet=True, method=b"POST")
d.addCallback(gotPage)
return d
def test_downloadAfterFoundGet(self):
"""
Passing C{True} for C{afterFoundGet} to L{client.downloadPage} invokes
the same kind of redirect handling as passing that argument to
L{client.getPage} invokes.
"""
url = self.getURL('extendedRedirect?code=302')
def gotPage(page):
self.assertEqual(
self.extendedRedirect.lastMethod,
b"GET",
"With afterFoundGet, the HTTP method must change to GET")
d = client.downloadPage(url, "downloadTemp",
followRedirect=True, afterFoundGet=True, method="POST")
d.addCallback(gotPage)
return d
def test_afterFoundGetMakesOneRequest(self):
"""
When C{afterFoundGet} is C{True}, L{client.getPage} only issues one
request to the server when following the redirect. This is a regression
test, see #4760.
"""
def checkRedirectCount(*a):
self.assertEqual(self.afterFoundGetCounter.count, 1)
url = self.getURL('afterFoundGetRedirect')
d = client.getPage(
url, followRedirect=True, afterFoundGet=True, method=b"POST")
d.addCallback(checkRedirectCount)
return d
def testPartial(self):
name = self.mktemp()
f = open(name, "wb")
f.write(b"abcd")
f.close()
partialDownload = [(True, b"abcd456789"),
(True, b"abcd456789"),
(False, b"0123456789")]
d = defer.succeed(None)
for (partial, expectedData) in partialDownload:
d.addCallback(self._cbRunPartial, name, partial)
d.addCallback(self._cbPartialTest, expectedData, name)
return d
testPartial.skip = "Cannot test until webserver can serve partial data properly"
def _cbRunPartial(self, ignored, name, partial):
return client.downloadPage(self.getURL("file"), name, supportPartial=partial)
def _cbPartialTest(self, ignored, expectedData, filename):
bytes = file(filename, "rb").read()
self.assertEqual(bytes, expectedData)
def test_downloadTimeout(self):
"""
If the timeout indicated by the C{timeout} parameter to
L{client.HTTPDownloader.__init__} elapses without the complete response
being received, the L{defer.Deferred} returned by
L{client.downloadPage} fires with a L{Failure} wrapping a
L{defer.TimeoutError}.
"""
self.cleanupServerConnections = 2
# Verify the behavior if no bytes are ever written.
first = client.downloadPage(
self.getURL("wait"),
self.mktemp(), timeout=0.01)
# Verify the behavior if some bytes are written but then the request
# never completes.
second = client.downloadPage(
self.getURL("write-then-wait"),
self.mktemp(), timeout=0.01)
return defer.gatherResults([
self.assertFailure(first, defer.TimeoutError),
self.assertFailure(second, defer.TimeoutError)])
def test_downloadHeaders(self):
"""
After L{client.HTTPDownloader.deferred} fires, the
L{client.HTTPDownloader} instance's C{status} and C{response_headers}
attributes are populated with the values from the response.
"""
def checkHeaders(factory):
self.assertEqual(factory.status, b'200')
self.assertEqual(factory.response_headers[b'content-type'][0], b'text/html')
self.assertEqual(factory.response_headers[b'content-length'][0], b'10')
os.unlink(factory.fileName)
factory = client._makeGetterFactory(
self.getURL('file'),
client.HTTPDownloader,
fileOrName=self.mktemp())
return factory.deferred.addCallback(lambda _: checkHeaders(factory))
def test_downloadCookies(self):
"""
The C{cookies} dict passed to the L{client.HTTPDownloader}
initializer is used to populate the I{Cookie} header included in the
request sent to the server.
"""
output = self.mktemp()
factory = client._makeGetterFactory(
self.getURL('cookiemirror'),
client.HTTPDownloader,
fileOrName=output,
cookies={b'foo': b'bar'})
def cbFinished(ignored):
self.assertEqual(
FilePath(output).getContent(),
"[('foo', 'bar')]")
factory.deferred.addCallback(cbFinished)
return factory.deferred
def test_downloadRedirectLimit(self):
"""
When more than C{redirectLimit} HTTP redirects are encountered, the
page request fails with L{InfiniteRedirection}.
"""
def checkRedirectCount(*a):
self.assertEqual(f._redirectCount, 7)
self.assertEqual(self.infiniteRedirectResource.count, 7)
f = client._makeGetterFactory(
self.getURL('infiniteRedirect'),
client.HTTPDownloader,
fileOrName=self.mktemp(),
redirectLimit=7)
d = self.assertFailure(f.deferred, error.InfiniteRedirection)
d.addCallback(checkRedirectCount)
return d
if _PY3:
for method in (
test_downloadPageBrokenDownload,
test_downloadPageLogsFileCloseError,
testDownloadPage,
testDownloadPageError1,
testDownloadPageError2,
testDownloadPageError3,
testDownloadServerError,
test_downloadAfterFoundGet,
testPartial,
test_downloadTimeout,
test_downloadHeaders,
test_downloadCookies,
test_downloadRedirectLimit):
method.skip = _PY3DownloadSkip
del method
class WebClientSSLTestCase(WebClientTestCase):
def _listen(self, site):
return reactor.listenSSL(
0, site,
contextFactory=ssl.DefaultOpenSSLContextFactory(
serverPEMPath, serverPEMPath),
interface="127.0.0.1")
def getURL(self, path):
return networkString("https://127.0.0.1:%d/%s" % (self.portno, path))
def testFactoryInfo(self):
url = self.getURL('file')
scheme, host, port, path = client._parse(url)
factory = client.HTTPClientFactory(url)
reactor.connectSSL(nativeString(host), port, factory,
ssl.ClientContextFactory())
# The base class defines _cbFactoryInfo correctly for this
return factory.deferred.addCallback(self._cbFactoryInfo, factory)
class WebClientRedirectBetweenSSLandPlainText(unittest.TestCase):
def getHTTPS(self, path):
return networkString("https://127.0.0.1:%d/%s" % (self.tlsPortno, path))
def getHTTP(self, path):
return networkString("http://127.0.0.1:%d/%s" % (self.plainPortno, path))
def setUp(self):
plainRoot = Data(b'not me', b'text/plain')
tlsRoot = Data(b'me neither', b'text/plain')
plainSite = server.Site(plainRoot, timeout=None)
tlsSite = server.Site(tlsRoot, timeout=None)
self.tlsPort = reactor.listenSSL(
0, tlsSite,
contextFactory=ssl.DefaultOpenSSLContextFactory(
serverPEMPath, serverPEMPath),
interface="127.0.0.1")
self.plainPort = reactor.listenTCP(0, plainSite, interface="127.0.0.1")
self.plainPortno = self.plainPort.getHost().port
self.tlsPortno = self.tlsPort.getHost().port
plainRoot.putChild(b'one', Redirect(self.getHTTPS('two')))
tlsRoot.putChild(b'two', Redirect(self.getHTTP('three')))
plainRoot.putChild(b'three', Redirect(self.getHTTPS('four')))
tlsRoot.putChild(b'four', Data(b'FOUND IT!', b'text/plain'))
def tearDown(self):
ds = list(
map(defer.maybeDeferred,
[self.plainPort.stopListening, self.tlsPort.stopListening]))
return defer.gatherResults(ds)
def testHoppingAround(self):
return client.getPage(self.getHTTP("one")
).addCallback(self.assertEqual, b"FOUND IT!"
)
class CookieTestCase(unittest.TestCase):
def _listen(self, site):
return reactor.listenTCP(0, site, interface="127.0.0.1")
def setUp(self):
root = Data(b'El toro!', b'text/plain')
root.putChild(b"cookiemirror", CookieMirrorResource())
root.putChild(b"rawcookiemirror", RawCookieMirrorResource())
site = server.Site(root, timeout=None)
self.port = self._listen(site)
self.portno = self.port.getHost().port
def tearDown(self):
return self.port.stopListening()
def getHTTP(self, path):
return networkString("http://127.0.0.1:%d/%s" % (self.portno, path))
def testNoCookies(self):
return client.getPage(self.getHTTP("cookiemirror")
).addCallback(self.assertEqual, b"[]"
)
def testSomeCookies(self):
cookies = {b'foo': b'bar', b'baz': b'quux'}
return client.getPage(self.getHTTP("cookiemirror"), cookies=cookies
).addCallback(self.assertEqual, b"[('baz', 'quux'), ('foo', 'bar')]"
)
def testRawNoCookies(self):
return client.getPage(self.getHTTP("rawcookiemirror")
).addCallback(self.assertEqual, b"None"
)
def testRawSomeCookies(self):
cookies = {b'foo': b'bar', b'baz': b'quux'}
return client.getPage(self.getHTTP("rawcookiemirror"), cookies=cookies
).addCallback(self.assertIn,
(b"'foo=bar; baz=quux'", b"'baz=quux; foo=bar'")
)
def testCookieHeaderParsing(self):
factory = client.HTTPClientFactory(b'http://foo.example.com/')
proto = factory.buildProtocol('127.42.42.42')
transport = StringTransport()
proto.makeConnection(transport)
for line in [
b'200 Ok',
b'Squash: yes',
b'Hands: stolen',
b'Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT',
b'Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/',
b'Set-Cookie: SHIPPING=FEDEX; path=/foo',
b'',
b'body',
b'more body',
]:
proto.dataReceived(line + b'\r\n')
self.assertEqual(transport.value(),
b'GET / HTTP/1.0\r\n'
b'Host: foo.example.com\r\n'
b'User-Agent: Twisted PageGetter\r\n'
b'\r\n')
self.assertEqual(factory.cookies,
{
b'CUSTOMER': b'WILE_E_COYOTE',
b'PART_NUMBER': b'ROCKET_LAUNCHER_0001',
b'SHIPPING': b'FEDEX',
})
class TestHostHeader(unittest.TestCase):
"""
Test that L{HTTPClientFactory} includes the port in the host header
if needed.
"""
def _getHost(self, bytes):
"""
Retrieve the value of the I{Host} header from the serialized
request given by C{bytes}.
"""
for line in bytes.split(b'\r\n'):
try:
name, value = line.split(b':', 1)
if name.strip().lower() == b'host':
return value.strip()
except ValueError:
pass
def test_HTTPDefaultPort(self):
"""
No port should be included in the host header when connecting to the
default HTTP port.
"""
factory = client.HTTPClientFactory(b'http://foo.example.com/')
proto = factory.buildProtocol(b'127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEqual(self._getHost(proto.transport.value()),
b'foo.example.com')
def test_HTTPPort80(self):
"""
No port should be included in the host header when connecting to the
default HTTP port even if it is in the URL.
"""
factory = client.HTTPClientFactory(b'http://foo.example.com:80/')
proto = factory.buildProtocol('127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEqual(self._getHost(proto.transport.value()),
b'foo.example.com')
def test_HTTPNotPort80(self):
"""
The port should be included in the host header when connecting to the
a non default HTTP port.
"""
factory = client.HTTPClientFactory(b'http://foo.example.com:8080/')
proto = factory.buildProtocol('127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEqual(self._getHost(proto.transport.value()),
b'foo.example.com:8080')
def test_HTTPSDefaultPort(self):
"""
No port should be included in the host header when connecting to the
default HTTPS port.
"""
factory = client.HTTPClientFactory(b'https://foo.example.com/')
proto = factory.buildProtocol('127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEqual(self._getHost(proto.transport.value()),
b'foo.example.com')
def test_HTTPSPort443(self):
"""
No port should be included in the host header when connecting to the
default HTTPS port even if it is in the URL.
"""
factory = client.HTTPClientFactory(b'https://foo.example.com:443/')
proto = factory.buildProtocol('127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEqual(self._getHost(proto.transport.value()),
b'foo.example.com')
def test_HTTPSNotPort443(self):
"""
The port should be included in the host header when connecting to the
a non default HTTPS port.
"""
factory = client.HTTPClientFactory(b'http://foo.example.com:8080/')
proto = factory.buildProtocol('127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEqual(self._getHost(proto.transport.value()),
b'foo.example.com:8080')
if ssl is None or not hasattr(ssl, 'DefaultOpenSSLContextFactory'):
for case in [WebClientSSLTestCase, WebClientRedirectBetweenSSLandPlainText]:
case.skip = "OpenSSL not present"
if not interfaces.IReactorSSL(reactor, None):
for case in [WebClientSSLTestCase, WebClientRedirectBetweenSSLandPlainText]:
case.skip = "Reactor doesn't support SSL"
| apache-2.0 |
mancoast/CPythonPyc_test | cpython/250_test_posix.py | 7 | 6643 | "Test posix functions"
from test import test_support
try:
import posix
except ImportError:
raise test_support.TestSkipped, "posix is not available"
import time
import os
import sys
import unittest
import warnings
warnings.filterwarnings('ignore', '.* potential security risk .*',
RuntimeWarning)
class PosixTester(unittest.TestCase):
def setUp(self):
# create empty file
fp = open(test_support.TESTFN, 'w+')
fp.close()
def tearDown(self):
os.unlink(test_support.TESTFN)
def testNoArgFunctions(self):
# test posix functions which take no arguments and have
# no side-effects which we need to cleanup (e.g., fork, wait, abort)
NO_ARG_FUNCTIONS = [ "ctermid", "getcwd", "getcwdu", "uname",
"times", "getloadavg", "tmpnam",
"getegid", "geteuid", "getgid", "getgroups",
"getpid", "getpgrp", "getppid", "getuid",
]
for name in NO_ARG_FUNCTIONS:
posix_func = getattr(posix, name, None)
if posix_func is not None:
posix_func()
self.assertRaises(TypeError, posix_func, 1)
def test_statvfs(self):
if hasattr(posix, 'statvfs'):
self.assert_(posix.statvfs(os.curdir))
def test_fstatvfs(self):
if hasattr(posix, 'fstatvfs'):
fp = open(test_support.TESTFN)
try:
self.assert_(posix.fstatvfs(fp.fileno()))
finally:
fp.close()
def test_ftruncate(self):
if hasattr(posix, 'ftruncate'):
fp = open(test_support.TESTFN, 'w+')
try:
# we need to have some data to truncate
fp.write('test')
fp.flush()
posix.ftruncate(fp.fileno(), 0)
finally:
fp.close()
def test_dup(self):
if hasattr(posix, 'dup'):
fp = open(test_support.TESTFN)
try:
fd = posix.dup(fp.fileno())
self.assert_(isinstance(fd, int))
os.close(fd)
finally:
fp.close()
def test_confstr(self):
if hasattr(posix, 'confstr'):
self.assertRaises(ValueError, posix.confstr, "CS_garbage")
self.assertEqual(len(posix.confstr("CS_PATH")) > 0, True)
def test_dup2(self):
if hasattr(posix, 'dup2'):
fp1 = open(test_support.TESTFN)
fp2 = open(test_support.TESTFN)
try:
posix.dup2(fp1.fileno(), fp2.fileno())
finally:
fp1.close()
fp2.close()
def fdopen_helper(self, *args):
fd = os.open(test_support.TESTFN, os.O_RDONLY)
fp2 = posix.fdopen(fd, *args)
fp2.close()
def test_fdopen(self):
if hasattr(posix, 'fdopen'):
self.fdopen_helper()
self.fdopen_helper('r')
self.fdopen_helper('r', 100)
def test_osexlock(self):
if hasattr(posix, "O_EXLOCK"):
fd = os.open(test_support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, test_support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
if hasattr(posix, "O_SHLOCK"):
fd = os.open(test_support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, test_support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
def test_osshlock(self):
if hasattr(posix, "O_SHLOCK"):
fd1 = os.open(test_support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
fd2 = os.open(test_support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
os.close(fd2)
os.close(fd1)
if hasattr(posix, "O_EXLOCK"):
fd = os.open(test_support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, test_support.TESTFN,
os.O_RDONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
def test_fstat(self):
if hasattr(posix, 'fstat'):
fp = open(test_support.TESTFN)
try:
self.assert_(posix.fstat(fp.fileno()))
finally:
fp.close()
def test_stat(self):
if hasattr(posix, 'stat'):
self.assert_(posix.stat(test_support.TESTFN))
def test_chdir(self):
if hasattr(posix, 'chdir'):
posix.chdir(os.curdir)
self.assertRaises(OSError, posix.chdir, test_support.TESTFN)
def test_lsdir(self):
if hasattr(posix, 'lsdir'):
self.assert_(test_support.TESTFN in posix.lsdir(os.curdir))
def test_access(self):
if hasattr(posix, 'access'):
self.assert_(posix.access(test_support.TESTFN, os.R_OK))
def test_umask(self):
if hasattr(posix, 'umask'):
old_mask = posix.umask(0)
self.assert_(isinstance(old_mask, int))
posix.umask(old_mask)
def test_strerror(self):
if hasattr(posix, 'strerror'):
self.assert_(posix.strerror(0))
def test_pipe(self):
if hasattr(posix, 'pipe'):
reader, writer = posix.pipe()
os.close(reader)
os.close(writer)
def test_tempnam(self):
if hasattr(posix, 'tempnam'):
self.assert_(posix.tempnam())
self.assert_(posix.tempnam(os.curdir))
self.assert_(posix.tempnam(os.curdir, 'blah'))
def test_tmpfile(self):
if hasattr(posix, 'tmpfile'):
fp = posix.tmpfile()
fp.close()
def test_utime(self):
if hasattr(posix, 'utime'):
now = time.time()
posix.utime(test_support.TESTFN, None)
self.assertRaises(TypeError, posix.utime, test_support.TESTFN, (None, None))
self.assertRaises(TypeError, posix.utime, test_support.TESTFN, (now, None))
self.assertRaises(TypeError, posix.utime, test_support.TESTFN, (None, now))
posix.utime(test_support.TESTFN, (int(now), int(now)))
posix.utime(test_support.TESTFN, (now, now))
def test_main():
test_support.run_unittest(PosixTester)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
beni55/sympy | sympy/plotting/pygletplot/__init__.py | 120 | 4266 | """Plotting module that can plot 2D and 3D functions
"""
from sympy.utilities.decorator import doctest_depends_on
try:
@doctest_depends_on(modules=('pyglet',))
def PygletPlot(*args, **kwargs):
"""
Plot Examples
=============
See examples/advanced/pyglet_plotting.py for many more examples.
>>> from sympy.plotting.pygletplot import PygletPlot as Plot
>>> from sympy import symbols
>>> from sympy.abc import x, y, z
>>> Plot(x*y**3-y*x**3)
[0]: -x**3*y + x*y**3, 'mode=cartesian'
>>> p = Plot()
>>> p[1] = x*y
>>> p[1].color = z, (0.4,0.4,0.9), (0.9,0.4,0.4)
>>> p = Plot()
>>> p[1] = x**2+y**2
>>> p[2] = -x**2-y**2
Variable Intervals
==================
The basic format is [var, min, max, steps], but the
syntax is flexible and arguments left out are taken
from the defaults for the current coordinate mode:
>>> Plot(x**2) # implies [x,-5,5,100]
[0]: x**2, 'mode=cartesian'
>>> Plot(x**2, [], []) # [x,-1,1,40], [y,-1,1,40]
[0]: x**2, 'mode=cartesian'
>>> Plot(x**2-y**2, [100], [100]) # [x,-1,1,100], [y,-1,1,100]
[0]: x**2 - y**2, 'mode=cartesian'
>>> Plot(x**2, [x,-13,13,100])
[0]: x**2, 'mode=cartesian'
>>> Plot(x**2, [-13,13]) # [x,-13,13,100]
[0]: x**2, 'mode=cartesian'
>>> Plot(x**2, [x,-13,13]) # [x,-13,13,100]
[0]: x**2, 'mode=cartesian'
>>> Plot(1*x, [], [x], mode='cylindrical')
... # [unbound_theta,0,2*Pi,40], [x,-1,1,20]
[0]: x, 'mode=cartesian'
Coordinate Modes
================
Plot supports several curvilinear coordinate modes, and
they independent for each plotted function. You can specify
a coordinate mode explicitly with the 'mode' named argument,
but it can be automatically determined for Cartesian or
parametric plots, and therefore must only be specified for
polar, cylindrical, and spherical modes.
Specifically, Plot(function arguments) and Plot[n] =
(function arguments) will interpret your arguments as a
Cartesian plot if you provide one function and a parametric
plot if you provide two or three functions. Similarly, the
arguments will be interpreted as a curve if one variable is
used, and a surface if two are used.
Supported mode names by number of variables:
1: parametric, cartesian, polar
2: parametric, cartesian, cylindrical = polar, spherical
>>> Plot(1, mode='spherical') # doctest: +SKIP
Calculator-like Interface
=========================
>>> p = Plot(visible=False)
>>> f = x**2
>>> p[1] = f
>>> p[2] = f.diff(x)
>>> p[3] = f.diff(x).diff(x) # doctest: +SKIP
>>> p # doctest: +SKIP
[1]: x**2, 'mode=cartesian'
[2]: 2*x, 'mode=cartesian'
[3]: 2, 'mode=cartesian'
>>> p.show()
>>> p.clear()
>>> p
<blank plot>
>>> p[1] = x**2+y**2
>>> p[1].style = 'solid'
>>> p[2] = -x**2-y**2
>>> p[2].style = 'wireframe'
>>> p[1].color = z, (0.4,0.4,0.9), (0.9,0.4,0.4)
>>> p[1].style = 'both'
>>> p[2].style = 'both'
>>> p.close()
Plot Window Keyboard Controls
=============================
Screen Rotation:
X,Y axis Arrow Keys, A,S,D,W, Numpad 4,6,8,2
Z axis Q,E, Numpad 7,9
Model Rotation:
Z axis Z,C, Numpad 1,3
Zoom: R,F, PgUp,PgDn, Numpad +,-
Reset Camera: X, Numpad 5
Camera Presets:
XY F1
XZ F2
YZ F3
Perspective F4
Sensitivity Modifier: SHIFT
Axes Toggle:
Visible F5
Colors F6
Close Window: ESCAPE
=============================
"""
import plot
return plot.PygletPlot(*args, **kwargs)
except Exception as e:
def PygletPlot(*args, **kwargs):
raise e
| bsd-3-clause |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.3/django/contrib/webdesign/tests.py | 379 | 1054 | # -*- coding: utf-8 -*-
import unittest
from django.contrib.webdesign.lorem_ipsum import *
from django.template import loader, Context
class WebdesignTest(unittest.TestCase):
def test_words(self):
self.assertEqual(words(7), u'lorem ipsum dolor sit amet consectetur adipisicing')
def test_paragraphs(self):
self.assertEqual(paragraphs(1),
['Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'])
def test_lorem_tag(self):
t = loader.get_template_from_string("{% load webdesign %}{% lorem 3 w %}")
self.assertEqual(t.render(Context({})),
u'lorem ipsum dolor')
| bsd-3-clause |
beeftornado/sentry | tests/sentry/api/serializers/test_debugfile.py | 3 | 2569 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry.api.serializers import serialize
from sentry.testutils import TestCase
class DebugFileSerializerTest(TestCase):
def test_simple(self):
file = self.create_file(
name="baz.dSYM",
size=42,
headers={"Content-Type": "application/x-mach-binary"},
checksum="dc1e3f3e411979d336c3057cce64294f3420f93a",
)
dif = self.create_dif_file(
debug_id="dfb8e43a-f242-3d73-a453-aeb6a777ef75",
code_id="DFB8E43AF2423D73A453AEB6A777EF75",
object_name="baz.dSYM",
cpu_name="x86_64",
file=file,
data={"features": ["debug"]},
)
result = serialize(dif)
result.pop("id")
result.pop("dateCreated")
assert result == {
"uuid": "dfb8e43a-f242-3d73-a453-aeb6a777ef75",
"debugId": "dfb8e43a-f242-3d73-a453-aeb6a777ef75",
"codeId": "DFB8E43AF2423D73A453AEB6A777EF75",
"cpuName": "x86_64",
"objectName": "baz.dSYM",
"symbolType": "macho",
"size": 42,
"sha1": "dc1e3f3e411979d336c3057cce64294f3420f93a",
"headers": {"Content-Type": "application/x-mach-binary"},
"data": {"features": ["debug"]},
}
def test_long_debug_id(self):
file = self.create_file(
name="baz.dSYM",
size=42,
headers={"Content-Type": "application/x-mach-binary"},
checksum="dc1e3f3e411979d336c3057cce64294f3420f93a",
)
dif = self.create_dif_file(
debug_id="dfb8e43a-f242-3d73-a453-aeb6a777ef75-feedface",
code_id="DFB8E43AF2423D73A453AEB6A777EF75feedface",
object_name="baz.dSYM",
cpu_name="x86_64",
file=file,
data={"features": ["debug"]},
)
result = serialize(dif)
result.pop("id")
result.pop("dateCreated")
assert result == {
"uuid": "dfb8e43a-f242-3d73-a453-aeb6a777ef75",
"debugId": "dfb8e43a-f242-3d73-a453-aeb6a777ef75-feedface",
"codeId": "DFB8E43AF2423D73A453AEB6A777EF75feedface",
"cpuName": "x86_64",
"objectName": "baz.dSYM",
"symbolType": "macho",
"size": 42,
"sha1": "dc1e3f3e411979d336c3057cce64294f3420f93a",
"headers": {"Content-Type": "application/x-mach-binary"},
"data": {"features": ["debug"]},
}
| bsd-3-clause |
caotianwei/django | tests/forms_tests/tests/test_error_messages.py | 169 | 11182 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import (
BooleanField, CharField, ChoiceField, DateField, DateTimeField,
DecimalField, EmailField, FileField, FloatField, Form,
GenericIPAddressField, IntegerField, ModelChoiceField,
ModelMultipleChoiceField, MultipleChoiceField, RegexField,
SplitDateTimeField, TimeField, URLField, ValidationError, utils,
)
from django.test import SimpleTestCase, TestCase
from django.utils.encoding import python_2_unicode_compatible
from django.utils.safestring import mark_safe
class AssertFormErrorsMixin(object):
def assertFormErrors(self, expected, the_callable, *args, **kwargs):
try:
the_callable(*args, **kwargs)
self.fail("Testing the 'clean' method on %s failed to raise a ValidationError.")
except ValidationError as e:
self.assertEqual(e.messages, expected)
class FormsErrorMessagesTestCase(SimpleTestCase, AssertFormErrorsMixin):
def test_charfield(self):
e = {
'required': 'REQUIRED',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = CharField(min_length=5, max_length=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['LENGTH 4, MIN LENGTH 5'], f.clean, '1234')
self.assertFormErrors(['LENGTH 11, MAX LENGTH 10'], f.clean, '12345678901')
def test_integerfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
}
f = IntegerField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
self.assertFormErrors(['MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors(['MAX VALUE IS 10'], f.clean, '11')
def test_floatfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
}
f = FloatField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
self.assertFormErrors(['MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors(['MAX VALUE IS 10'], f.clean, '11')
def test_decimalfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
'max_digits': 'MAX DIGITS IS %(max)s',
'max_decimal_places': 'MAX DP IS %(max)s',
'max_whole_digits': 'MAX DIGITS BEFORE DP IS %(max)s',
}
f = DecimalField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
self.assertFormErrors(['MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors(['MAX VALUE IS 10'], f.clean, '11')
f2 = DecimalField(max_digits=4, decimal_places=2, error_messages=e)
self.assertFormErrors(['MAX DIGITS IS 4'], f2.clean, '123.45')
self.assertFormErrors(['MAX DP IS 2'], f2.clean, '1.234')
self.assertFormErrors(['MAX DIGITS BEFORE DP IS 2'], f2.clean, '123.4')
def test_datefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = DateField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
def test_timefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = TimeField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
def test_datetimefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = DateTimeField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
def test_regexfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = RegexField(r'^[0-9]+$', min_length=5, max_length=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abcde')
self.assertFormErrors(['LENGTH 4, MIN LENGTH 5'], f.clean, '1234')
self.assertFormErrors(['LENGTH 11, MAX LENGTH 10'], f.clean, '12345678901')
def test_emailfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = EmailField(min_length=8, max_length=10, error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abcdefgh')
self.assertFormErrors(['LENGTH 7, MIN LENGTH 8'], f.clean, 'a@b.com')
self.assertFormErrors(['LENGTH 11, MAX LENGTH 10'], f.clean, 'aye@bee.com')
def test_filefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'missing': 'MISSING',
'empty': 'EMPTY FILE',
}
f = FileField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc')
self.assertFormErrors(['EMPTY FILE'], f.clean, SimpleUploadedFile('name', None))
self.assertFormErrors(['EMPTY FILE'], f.clean, SimpleUploadedFile('name', ''))
def test_urlfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'max_length': '"%(value)s" has more than %(limit_value)d characters.',
}
f = URLField(error_messages=e, max_length=17)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID'], f.clean, 'abc.c')
self.assertFormErrors(['"http://djangoproject.com" has more than 17 characters.'], f.clean, 'djangoproject.com')
def test_booleanfield(self):
e = {
'required': 'REQUIRED',
}
f = BooleanField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
def test_choicefield(self):
e = {
'required': 'REQUIRED',
'invalid_choice': '%(value)s IS INVALID CHOICE',
}
f = ChoiceField(choices=[('a', 'aye')], error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['b IS INVALID CHOICE'], f.clean, 'b')
def test_multiplechoicefield(self):
e = {
'required': 'REQUIRED',
'invalid_choice': '%(value)s IS INVALID CHOICE',
'invalid_list': 'NOT A LIST',
}
f = MultipleChoiceField(choices=[('a', 'aye')], error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['NOT A LIST'], f.clean, 'b')
self.assertFormErrors(['b IS INVALID CHOICE'], f.clean, ['b'])
def test_splitdatetimefield(self):
e = {
'required': 'REQUIRED',
'invalid_date': 'INVALID DATE',
'invalid_time': 'INVALID TIME',
}
f = SplitDateTimeField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID DATE', 'INVALID TIME'], f.clean, ['a', 'b'])
def test_generic_ipaddressfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID IP ADDRESS',
}
f = GenericIPAddressField(error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID IP ADDRESS'], f.clean, '127.0.0')
def test_subclassing_errorlist(self):
class TestForm(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
def clean(self):
raise ValidationError("I like to be awkward.")
@python_2_unicode_compatible
class CustomErrorList(utils.ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self:
return ''
return mark_safe('<div class="error">%s</div>' % ''.join('<p>%s</p>' % e for e in self))
# This form should print errors the default way.
form1 = TestForm({'first_name': 'John'})
self.assertHTMLEqual(str(form1['last_name'].errors), '<ul class="errorlist"><li>This field is required.</li></ul>')
self.assertHTMLEqual(str(form1.errors['__all__']), '<ul class="errorlist nonfield"><li>I like to be awkward.</li></ul>')
# This one should wrap error groups in the customized way.
form2 = TestForm({'first_name': 'John'}, error_class=CustomErrorList)
self.assertHTMLEqual(str(form2['last_name'].errors), '<div class="error"><p>This field is required.</p></div>')
self.assertHTMLEqual(str(form2.errors['__all__']), '<div class="error"><p>I like to be awkward.</p></div>')
class ModelChoiceFieldErrorMessagesTestCase(TestCase, AssertFormErrorsMixin):
def test_modelchoicefield(self):
# Create choices for the model choice field tests below.
from forms_tests.models import ChoiceModel
ChoiceModel.objects.create(pk=1, name='a')
ChoiceModel.objects.create(pk=2, name='b')
ChoiceModel.objects.create(pk=3, name='c')
# ModelChoiceField
e = {
'required': 'REQUIRED',
'invalid_choice': 'INVALID CHOICE',
}
f = ModelChoiceField(queryset=ChoiceModel.objects.all(), error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['INVALID CHOICE'], f.clean, '4')
# ModelMultipleChoiceField
e = {
'required': 'REQUIRED',
'invalid_choice': '%(value)s IS INVALID CHOICE',
'list': 'NOT A LIST OF VALUES',
}
f = ModelMultipleChoiceField(queryset=ChoiceModel.objects.all(), error_messages=e)
self.assertFormErrors(['REQUIRED'], f.clean, '')
self.assertFormErrors(['NOT A LIST OF VALUES'], f.clean, '3')
self.assertFormErrors(['4 IS INVALID CHOICE'], f.clean, ['4'])
| bsd-3-clause |
mwrightevent38/MissionPlanner | Lib/site-packages/numpy/core/getlimits.py | 54 | 8734 | """ Machine limits for Float32 and Float64 and (long double) if available...
"""
__all__ = ['finfo','iinfo']
from machar import MachAr
import numeric
import numerictypes as ntypes
from numeric import array
def _frz(a):
"""fix rank-0 --> rank-1"""
if a.ndim == 0: a.shape = (1,)
return a
_convert_to_float = {
ntypes.csingle: ntypes.single,
ntypes.complex_: ntypes.float_,
ntypes.clongfloat: ntypes.longfloat
}
class finfo(object):
"""
finfo(dtype)
Machine limits for floating point types.
Attributes
----------
eps : floating point number of the appropriate type
The smallest representable number such that ``1.0 + eps != 1.0``.
epsneg : floating point number of the appropriate type
The smallest representable number such that ``1.0 - epsneg != 1.0``.
iexp : int
The number of bits in the exponent portion of the floating point
representation.
machar : MachAr
The object which calculated these parameters and holds more detailed
information.
machep : int
The exponent that yields ``eps``.
max : floating point number of the appropriate type
The largest representable number.
maxexp : int
The smallest positive power of the base (2) that causes overflow.
min : floating point number of the appropriate type
The smallest representable number, typically ``-max``.
minexp : int
The most negative power of the base (2) consistent with there being
no leading 0's in the mantissa.
negep : int
The exponent that yields ``epsneg``.
nexp : int
The number of bits in the exponent including its sign and bias.
nmant : int
The number of bits in the mantissa.
precision : int
The approximate number of decimal digits to which this kind of float
is precise.
resolution : floating point number of the appropriate type
The approximate decimal resolution of this type, i.e.
``10**-precision``.
tiny : floating point number of the appropriate type
The smallest-magnitude usable number.
Parameters
----------
dtype : floating point type, dtype, or instance
The kind of floating point data type to get information about.
See Also
--------
MachAr : The implementation of the tests that produce this information.
iinfo : The equivalent for integer data types.
Notes
-----
For developers of NumPy: do not instantiate this at the module level. The
initial calculation of these parameters is expensive and negatively impacts
import times. These objects are cached, so calling ``finfo()`` repeatedly
inside your functions is not a problem.
"""
_finfo_cache = {}
def __new__(cls, dtype):
try:
dtype = numeric.dtype(dtype)
except TypeError:
# In case a float instance was given
dtype = numeric.dtype(type(dtype))
obj = cls._finfo_cache.get(dtype,None)
if obj is not None:
return obj
dtypes = [dtype]
newdtype = numeric.obj2sctype(dtype)
if newdtype is not dtype:
dtypes.append(newdtype)
dtype = newdtype
if not issubclass(dtype, numeric.inexact):
raise ValueError, "data type %r not inexact" % (dtype)
obj = cls._finfo_cache.get(dtype,None)
if obj is not None:
return obj
if not issubclass(dtype, numeric.floating):
newdtype = _convert_to_float[dtype]
if newdtype is not dtype:
dtypes.append(newdtype)
dtype = newdtype
obj = cls._finfo_cache.get(dtype,None)
if obj is not None:
return obj
obj = object.__new__(cls)._init(dtype)
for dt in dtypes:
cls._finfo_cache[dt] = obj
return obj
def _init(self, dtype):
self.dtype = numeric.dtype(dtype)
if dtype is ntypes.double:
itype = ntypes.int64
fmt = '%24.16e'
precname = 'double'
elif dtype is ntypes.single:
itype = ntypes.int32
fmt = '%15.7e'
precname = 'single'
elif dtype is ntypes.longdouble:
itype = ntypes.longlong
fmt = '%s'
precname = 'long double'
else:
raise ValueError, repr(dtype)
machar = MachAr(lambda v:array([v], dtype),
lambda v:_frz(v.astype(itype))[0],
lambda v:array(_frz(v)[0], dtype),
lambda v: fmt % array(_frz(v)[0], dtype),
'numpy %s precision floating point number' % precname)
for word in ['precision', 'iexp',
'maxexp','minexp','negep',
'machep']:
setattr(self,word,getattr(machar, word))
for word in ['tiny','resolution','epsneg']:
setattr(self,word,getattr(machar, word).flat[0])
self.max = machar.huge.flat[0]
self.min = -self.max
self.eps = machar.eps.flat[0]
self.nexp = machar.iexp
self.nmant = machar.it
self.machar = machar
self._str_tiny = machar._str_xmin.strip()
self._str_max = machar._str_xmax.strip()
self._str_epsneg = machar._str_epsneg.strip()
self._str_eps = machar._str_eps.strip()
self._str_resolution = machar._str_resolution.strip()
return self
def __str__(self):
return '''\
Machine parameters for %(dtype)s
---------------------------------------------------------------------
precision=%(precision)3s resolution= %(_str_resolution)s
machep=%(machep)6s eps= %(_str_eps)s
negep =%(negep)6s epsneg= %(_str_epsneg)s
minexp=%(minexp)6s tiny= %(_str_tiny)s
maxexp=%(maxexp)6s max= %(_str_max)s
nexp =%(nexp)6s min= -max
---------------------------------------------------------------------
''' % self.__dict__
class iinfo:
"""
iinfo(type)
Machine limits for integer types.
Attributes
----------
min : int
The smallest integer expressible by the type.
max : int
The largest integer expressible by the type.
Parameters
----------
type : integer type, dtype, or instance
The kind of integer data type to get information about.
See Also
--------
finfo : The equivalent for floating point data types.
Examples
--------
With types:
>>> ii16 = np.iinfo(np.int16)
>>> ii16.min
-32768
>>> ii16.max
32767
>>> ii32 = np.iinfo(np.int32)
>>> ii32.min
-2147483648
>>> ii32.max
2147483647
With instances:
>>> ii32 = np.iinfo(np.int32(10))
>>> ii32.min
-2147483648
>>> ii32.max
2147483647
"""
_min_vals = {}
_max_vals = {}
def __init__(self, int_type):
try:
self.dtype = numeric.dtype(int_type)
except TypeError:
self.dtype = numeric.dtype(type(int_type))
self.kind = self.dtype.kind
self.bits = self.dtype.itemsize * 8
self.key = "%s%d" % (self.kind, self.bits)
if not self.kind in 'iu':
raise ValueError("Invalid integer data type.")
def min(self):
"""Minimum value of given dtype."""
if self.kind == 'u':
return 0
else:
try:
val = iinfo._min_vals[self.key]
except KeyError:
val = int(-(1L << (self.bits-1)))
iinfo._min_vals[self.key] = val
return val
min = property(min)
def max(self):
"""Maximum value of given dtype."""
try:
val = iinfo._max_vals[self.key]
except KeyError:
if self.kind == 'u':
val = int((1L << self.bits) - 1)
else:
val = int((1L << (self.bits-1)) - 1)
iinfo._max_vals[self.key] = val
return val
max = property(max)
def __str__(self):
"""String representation."""
return '''\
Machine parameters for %(dtype)s
---------------------------------------------------------------------
min = %(min)s
max = %(max)s
---------------------------------------------------------------------
''' % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
if __name__ == '__main__':
f = finfo(ntypes.single)
print 'single epsilon:',f.eps
print 'single tiny:',f.tiny
f = finfo(ntypes.float)
print 'float epsilon:',f.eps
print 'float tiny:',f.tiny
f = finfo(ntypes.longfloat)
print 'longfloat epsilon:',f.eps
print 'longfloat tiny:',f.tiny
| gpl-3.0 |
MakeHer/edx-platform | common/lib/xmodule/xmodule/modulestore/modulestore_settings.py | 197 | 5460 | """
This file contains helper functions for configuring module_store_setting settings and support for backward compatibility with older formats.
"""
import warnings
import copy
def convert_module_store_setting_if_needed(module_store_setting):
"""
Converts old-style module_store_setting configuration settings to the new format.
"""
def convert_old_stores_into_list(old_stores):
"""
Converts and returns the given stores in old (unordered) dict-style format to the new (ordered) list format
"""
new_store_list = []
for store_name, store_settings in old_stores.iteritems():
store_settings['NAME'] = store_name
if store_name == 'default':
new_store_list.insert(0, store_settings)
else:
new_store_list.append(store_settings)
# migrate request for the old 'direct' Mongo store to the Draft store
if store_settings['ENGINE'] == 'xmodule.modulestore.mongo.MongoModuleStore':
warnings.warn("MongoModuleStore is deprecated! Please use DraftModuleStore.", DeprecationWarning)
store_settings['ENGINE'] = 'xmodule.modulestore.mongo.draft.DraftModuleStore'
return new_store_list
if module_store_setting is None:
return None
# Convert to Mixed, if needed
if module_store_setting['default']['ENGINE'] != 'xmodule.modulestore.mixed.MixedModuleStore':
warnings.warn("Direct access to a modulestore is deprecated. Please use MixedModuleStore.", DeprecationWarning)
# convert to using mixed module_store
new_module_store_setting = {
"default": {
"ENGINE": "xmodule.modulestore.mixed.MixedModuleStore",
"OPTIONS": {
"mappings": {},
"stores": []
}
}
}
# copy the old configurations into the new settings
new_module_store_setting['default']['OPTIONS']['stores'] = convert_old_stores_into_list(
module_store_setting
)
module_store_setting = new_module_store_setting
# Convert from dict, if needed
elif isinstance(get_mixed_stores(module_store_setting), dict):
warnings.warn(
"Using a dict for the Stores option in the MixedModuleStore is deprecated. Please use a list instead.",
DeprecationWarning
)
# convert old-style (unordered) dict to (an ordered) list
module_store_setting['default']['OPTIONS']['stores'] = convert_old_stores_into_list(
get_mixed_stores(module_store_setting)
)
assert isinstance(get_mixed_stores(module_store_setting), list)
# Add Split, if needed
# If Split is not defined but the DraftMongoModuleStore is configured, add Split as a copy of Draft
mixed_stores = get_mixed_stores(module_store_setting)
is_split_defined = any((store['ENGINE'].endswith('.DraftVersioningModuleStore')) for store in mixed_stores)
if not is_split_defined:
# find first setting of mongo store
mongo_store = next(
(store for store in mixed_stores if (
store['ENGINE'].endswith('.DraftMongoModuleStore') or store['ENGINE'].endswith('.DraftModuleStore')
)),
None
)
if mongo_store:
# deepcopy mongo -> split
split_store = copy.deepcopy(mongo_store)
# update the ENGINE and NAME fields
split_store['ENGINE'] = 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore'
split_store['NAME'] = 'split'
# add split to the end of the list
mixed_stores.append(split_store)
return module_store_setting
def update_module_store_settings(
module_store_setting,
doc_store_settings=None,
module_store_options=None,
xml_store_options=None,
default_store=None,
mappings=None,
):
"""
Updates the settings for each store defined in the given module_store_setting settings
with the given doc store configuration and options, overwriting existing keys.
If default_store is specified, the given default store is moved to the top of the
list of stores.
"""
for store in module_store_setting['default']['OPTIONS']['stores']:
if store['NAME'] == 'xml':
xml_store_options and store['OPTIONS'].update(xml_store_options)
else:
module_store_options and store['OPTIONS'].update(module_store_options)
doc_store_settings and store['DOC_STORE_CONFIG'].update(doc_store_settings)
if default_store:
mixed_stores = get_mixed_stores(module_store_setting)
for store in mixed_stores:
if store['NAME'] == default_store:
# move the found store to the top of the list
mixed_stores.remove(store)
mixed_stores.insert(0, store)
return
raise Exception("Could not find setting for requested default store: {}".format(default_store))
if mappings and 'mappings' in module_store_setting['default']['OPTIONS']:
module_store_setting['default']['OPTIONS']['mappings'] = mappings
def get_mixed_stores(mixed_setting):
"""
Helper for accessing stores in a configuration setting for the Mixed modulestore.
"""
return mixed_setting["default"]["OPTIONS"]["stores"]
| agpl-3.0 |
TangHao1987/intellij-community | python/helpers/profiler/profilerpy3/ttypes.py | 45 | 19680 | #
# Autogenerated by Thrift Compiler (1.0.0-dev)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thriftpy3.Thrift import TType, TMessageType, TException, TApplicationException
from thriftpy3.transport import TTransport
from thriftpy3.protocol import TBinaryProtocol, TProtocol
try:
from thriftpy3.protocol import fastbinary
except:
fastbinary = None
class FuncStat:
"""
Attributes:
- file
- line
- func_name
- calls_count
- total_time
- own_time
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'file', None, None, ), # 1
(2, TType.I32, 'line', None, None, ), # 2
(3, TType.STRING, 'func_name', None, None, ), # 3
(4, TType.I32, 'calls_count', None, None, ), # 4
(5, TType.DOUBLE, 'total_time', None, None, ), # 5
(6, TType.DOUBLE, 'own_time', None, None, ), # 6
)
def __init__(self, file=None, line=None, func_name=None, calls_count=None, total_time=None, own_time=None,):
self.file = file
self.line = line
self.func_name = func_name
self.calls_count = calls_count
self.total_time = total_time
self.own_time = own_time
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.file = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.line = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.func_name = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.calls_count = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.DOUBLE:
self.total_time = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.DOUBLE:
self.own_time = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('FuncStat')
if self.file is not None:
oprot.writeFieldBegin('file', TType.STRING, 1)
oprot.writeString(self.file)
oprot.writeFieldEnd()
if self.line is not None:
oprot.writeFieldBegin('line', TType.I32, 2)
oprot.writeI32(self.line)
oprot.writeFieldEnd()
if self.func_name is not None:
oprot.writeFieldBegin('func_name', TType.STRING, 3)
oprot.writeString(self.func_name)
oprot.writeFieldEnd()
if self.calls_count is not None:
oprot.writeFieldBegin('calls_count', TType.I32, 4)
oprot.writeI32(self.calls_count)
oprot.writeFieldEnd()
if self.total_time is not None:
oprot.writeFieldBegin('total_time', TType.DOUBLE, 5)
oprot.writeDouble(self.total_time)
oprot.writeFieldEnd()
if self.own_time is not None:
oprot.writeFieldBegin('own_time', TType.DOUBLE, 6)
oprot.writeDouble(self.own_time)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.file is None:
raise TProtocol.TProtocolException(message='Required field file is unset!')
if self.func_name is None:
raise TProtocol.TProtocolException(message='Required field func_name is unset!')
if self.calls_count is None:
raise TProtocol.TProtocolException(message='Required field calls_count is unset!')
if self.total_time is None:
raise TProtocol.TProtocolException(message='Required field total_time is unset!')
if self.own_time is None:
raise TProtocol.TProtocolException(message='Required field own_time is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.file)
value = (value * 31) ^ hash(self.line)
value = (value * 31) ^ hash(self.func_name)
value = (value * 31) ^ hash(self.calls_count)
value = (value * 31) ^ hash(self.total_time)
value = (value * 31) ^ hash(self.own_time)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Function:
"""
Attributes:
- func_stat
- callers
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'func_stat', (FuncStat, FuncStat.thrift_spec), None, ), # 1
(2, TType.LIST, 'callers', (TType.STRUCT,(FuncStat, FuncStat.thrift_spec)), None, ), # 2
)
def __init__(self, func_stat=None, callers=None,):
self.func_stat = func_stat
self.callers = callers
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.func_stat = FuncStat()
self.func_stat.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.callers = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in range(_size0):
_elem5 = FuncStat()
_elem5.read(iprot)
self.callers.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Function')
if self.func_stat is not None:
oprot.writeFieldBegin('func_stat', TType.STRUCT, 1)
self.func_stat.write(oprot)
oprot.writeFieldEnd()
if self.callers is not None:
oprot.writeFieldBegin('callers', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.callers))
for iter6 in self.callers:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.func_stat is None:
raise TProtocol.TProtocolException(message='Required field func_stat is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.func_stat)
value = (value * 31) ^ hash(self.callers)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Stats:
"""
Attributes:
- func_stats
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'func_stats', (TType.STRUCT,(Function, Function.thrift_spec)), None, ), # 1
)
def __init__(self, func_stats=None,):
self.func_stats = func_stats
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.func_stats = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in range(_size7):
_elem12 = Function()
_elem12.read(iprot)
self.func_stats.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Stats')
if self.func_stats is not None:
oprot.writeFieldBegin('func_stats', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.func_stats))
for iter13 in self.func_stats:
iter13.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.func_stats is None:
raise TProtocol.TProtocolException(message='Required field func_stats is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.func_stats)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Stats_Req:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Stats_Req')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SaveSnapshot_Req:
"""
Attributes:
- filepath
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'filepath', None, None, ), # 1
)
def __init__(self, filepath=None,):
self.filepath = filepath
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.filepath = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SaveSnapshot_Req')
if self.filepath is not None:
oprot.writeFieldBegin('filepath', TType.STRING, 1)
oprot.writeString(self.filepath)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.filepath is None:
raise TProtocol.TProtocolException(message='Required field filepath is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.filepath)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ProfilerRequest:
"""
Attributes:
- id
- ystats
- save_snapshot
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'id', None, None, ), # 1
(2, TType.STRUCT, 'ystats', (Stats_Req, Stats_Req.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'save_snapshot', (SaveSnapshot_Req, SaveSnapshot_Req.thrift_spec), None, ), # 3
)
def __init__(self, id=None, ystats=None, save_snapshot=None,):
self.id = id
self.ystats = ystats
self.save_snapshot = save_snapshot
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ystats = Stats_Req()
self.ystats.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.save_snapshot = SaveSnapshot_Req()
self.save_snapshot.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ProfilerRequest')
if self.id is not None:
oprot.writeFieldBegin('id', TType.I32, 1)
oprot.writeI32(self.id)
oprot.writeFieldEnd()
if self.ystats is not None:
oprot.writeFieldBegin('ystats', TType.STRUCT, 2)
self.ystats.write(oprot)
oprot.writeFieldEnd()
if self.save_snapshot is not None:
oprot.writeFieldBegin('save_snapshot', TType.STRUCT, 3)
self.save_snapshot.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
value = (value * 31) ^ hash(self.ystats)
value = (value * 31) ^ hash(self.save_snapshot)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ProfilerResponse:
"""
Attributes:
- id
- ystats
- snapshot_filepath
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'id', None, None, ), # 1
(2, TType.STRUCT, 'ystats', (Stats, Stats.thrift_spec), None, ), # 2
(3, TType.STRING, 'snapshot_filepath', None, None, ), # 3
)
def __init__(self, id=None, ystats=None, snapshot_filepath=None,):
self.id = id
self.ystats = ystats
self.snapshot_filepath = snapshot_filepath
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ystats = Stats()
self.ystats.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.snapshot_filepath = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ProfilerResponse')
if self.id is not None:
oprot.writeFieldBegin('id', TType.I32, 1)
oprot.writeI32(self.id)
oprot.writeFieldEnd()
if self.ystats is not None:
oprot.writeFieldBegin('ystats', TType.STRUCT, 2)
self.ystats.write(oprot)
oprot.writeFieldEnd()
if self.snapshot_filepath is not None:
oprot.writeFieldBegin('snapshot_filepath', TType.STRING, 3)
oprot.writeString(self.snapshot_filepath)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
value = (value * 31) ^ hash(self.ystats)
value = (value * 31) ^ hash(self.snapshot_filepath)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| apache-2.0 |
vipul-sharma20/oh-mainline | vendor/packages/twisted/twisted/test/test_compat.py | 18 | 6186 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.compat}.
"""
import types, socket
from twisted.trial import unittest
from twisted.python.compat import set, frozenset, reduce
class IterableCounter:
def __init__(self, lim=0):
self.lim = lim
self.i = -1
def __iter__(self):
return self
def next(self):
self.i += 1
if self.i >= self.lim:
raise StopIteration
return self.i
class CompatTestCase(unittest.TestCase):
def testDict(self):
d1 = {'a': 'b'}
d2 = dict(d1)
self.assertEquals(d1, d2)
d1['a'] = 'c'
self.assertNotEquals(d1, d2)
d2 = dict(d1.items())
self.assertEquals(d1, d2)
def testBool(self):
self.assertEquals(bool('hi'), True)
self.assertEquals(bool(True), True)
self.assertEquals(bool(''), False)
self.assertEquals(bool(False), False)
def testIteration(self):
lst1, lst2 = range(10), []
for i in iter(lst1):
lst2.append(i)
self.assertEquals(lst1, lst2)
del lst2[:]
try:
iterable = iter(lst1)
while 1:
lst2.append(iterable.next())
except StopIteration:
pass
self.assertEquals(lst1, lst2)
del lst2[:]
for i in iter(IterableCounter(10)):
lst2.append(i)
self.assertEquals(lst1, lst2)
del lst2[:]
try:
iterable = iter(IterableCounter(10))
while 1:
lst2.append(iterable.next())
except StopIteration:
pass
self.assertEquals(lst1, lst2)
del lst2[:]
for i in iter(IterableCounter(20).next, 10):
lst2.append(i)
self.assertEquals(lst1, lst2)
def testIsinstance(self):
self.assert_(isinstance(u'hi', types.StringTypes))
self.assert_(isinstance(self, unittest.TestCase))
# I'm pretty sure it's impossible to implement this
# without replacing isinstance on 2.2 as well :(
# self.assert_(isinstance({}, dict))
def testStrip(self):
self.assertEquals(' x '.lstrip(' '), 'x ')
self.assertEquals(' x x'.lstrip(' '), 'x x')
self.assertEquals(' x '.rstrip(' '), ' x')
self.assertEquals('x x '.rstrip(' '), 'x x')
self.assertEquals('\t x '.lstrip('\t '), 'x ')
self.assertEquals(' \tx x'.lstrip('\t '), 'x x')
self.assertEquals(' x\t '.rstrip(' \t'), ' x')
self.assertEquals('x x \t'.rstrip(' \t'), 'x x')
self.assertEquals('\t x '.strip('\t '), 'x')
self.assertEquals(' \tx x'.strip('\t '), 'x x')
self.assertEquals(' x\t '.strip(' \t'), 'x')
self.assertEquals('x x \t'.strip(' \t'), 'x x')
def testNToP(self):
from twisted.python.compat import inet_ntop
f = lambda a: inet_ntop(socket.AF_INET6, a)
g = lambda a: inet_ntop(socket.AF_INET, a)
self.assertEquals('::', f('\x00' * 16))
self.assertEquals('::1', f('\x00' * 15 + '\x01'))
self.assertEquals(
'aef:b01:506:1001:ffff:9997:55:170',
f('\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70'))
self.assertEquals('1.0.1.0', g('\x01\x00\x01\x00'))
self.assertEquals('170.85.170.85', g('\xaa\x55\xaa\x55'))
self.assertEquals('255.255.255.255', g('\xff\xff\xff\xff'))
self.assertEquals('100::', f('\x01' + '\x00' * 15))
self.assertEquals('100::1', f('\x01' + '\x00' * 14 + '\x01'))
def testPToN(self):
from twisted.python.compat import inet_pton
f = lambda a: inet_pton(socket.AF_INET6, a)
g = lambda a: inet_pton(socket.AF_INET, a)
self.assertEquals('\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEquals('\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEquals('\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEquals('\x00' * 16, f('::'))
self.assertEquals('\x00' * 16, f('0::0'))
self.assertEquals('\x00\x01' + '\x00' * 14, f('1::'))
self.assertEquals(
'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae'))
self.assertEquals('\x00' * 14 + '\x00\x01', f('::1'))
self.assertEquals('\x00' * 12 + '\x01\x02\x03\x04', f('::1.2.3.4'))
self.assertEquals(
'\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x01\x02\x03\xff',
f('1:2:3:4:5:6:1.2.3.255'))
for badaddr in ['1:2:3:4:5:6:7:8:', ':1:2:3:4:5:6:7:8', '1::2::3',
'1:::3', ':::', '1:2', '::1.2', '1.2.3.4::',
'abcd:1.2.3.4:abcd:abcd:abcd:abcd:abcd',
'1234:1.2.3.4:1234:1234:1234:1234:1234:1234',
'1.2.3.4']:
self.assertRaises(ValueError, f, badaddr)
def test_set(self):
"""
L{set} should behave like the expected set interface.
"""
a = set()
a.add('b')
a.add('c')
a.add('a')
b = list(a)
b.sort()
self.assertEquals(b, ['a', 'b', 'c'])
a.remove('b')
b = list(a)
b.sort()
self.assertEquals(b, ['a', 'c'])
a.discard('d')
b = set(['r', 's'])
d = a.union(b)
b = list(d)
b.sort()
self.assertEquals(b, ['a', 'c', 'r', 's'])
def test_frozenset(self):
"""
L{frozenset} should behave like the expected frozenset interface.
"""
a = frozenset(['a', 'b'])
self.assertRaises(AttributeError, getattr, a, "add")
self.assertEquals(list(a), ['a', 'b'])
b = frozenset(['r', 's'])
d = a.union(b)
b = list(d)
b.sort()
self.assertEquals(b, ['a', 'b', 'r', 's'])
def test_reduce(self):
"""
L{reduce} should behave like the builtin reduce.
"""
self.assertEquals(15, reduce(lambda x, y: x + y, [1, 2, 3, 4, 5]))
self.assertEquals(16, reduce(lambda x, y: x + y, [1, 2, 3, 4, 5], 1))
| agpl-3.0 |
VirtueSecurity/aws-extender | BappModules/boto/kinesis/layer1.py | 33 | 41004 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import base64
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.kinesis import exceptions
from boto.compat import json
from boto.compat import six
class KinesisConnection(AWSQueryConnection):
"""
Amazon Kinesis Service API Reference
Amazon Kinesis is a managed service that scales elastically for
real time processing of streaming big data.
"""
APIVersion = "2013-12-02"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "kinesis.us-east-1.amazonaws.com"
ServiceName = "Kinesis"
TargetPrefix = "Kinesis_20131202"
ResponseError = JSONResponseError
_faults = {
"ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException,
"LimitExceededException": exceptions.LimitExceededException,
"ExpiredIteratorException": exceptions.ExpiredIteratorException,
"ResourceInUseException": exceptions.ResourceInUseException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InvalidArgumentException": exceptions.InvalidArgumentException,
"SubscriptionRequiredException": exceptions.SubscriptionRequiredException
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(KinesisConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_tags_to_stream(self, stream_name, tags):
"""
Adds or updates tags for the specified Amazon Kinesis stream.
Each stream can have up to 10 tags.
If tags have already been assigned to the stream,
`AddTagsToStream` overwrites any existing tags that correspond
to the specified tag keys.
:type stream_name: string
:param stream_name: The name of the stream.
:type tags: map
:param tags: The set of key-value pairs to use to create the tags.
"""
params = {'StreamName': stream_name, 'Tags': tags, }
return self.make_request(action='AddTagsToStream',
body=json.dumps(params))
def create_stream(self, stream_name, shard_count):
"""
Creates a Amazon Kinesis stream. A stream captures and
transports data records that are continuously emitted from
different data sources or producers . Scale-out within an
Amazon Kinesis stream is explicitly supported by means of
shards, which are uniquely identified groups of data records
in an Amazon Kinesis stream.
You specify and control the number of shards that a stream is
composed of. Each open shard can support up to 5 read
transactions per second, up to a maximum total of 2 MB of data
read per second. Each shard can support up to 1000 records
written per second, up to a maximum total of 1 MB data written
per second. You can add shards to a stream if the amount of
data input increases and you can remove shards if the amount
of data input decreases.
The stream name identifies the stream. The name is scoped to
the AWS account used by the application. It is also scoped by
region. That is, two streams in two different accounts can
have the same name, and two streams in the same account, but
in two different regions, can have the same name.
`CreateStream` is an asynchronous operation. Upon receiving a
`CreateStream` request, Amazon Kinesis immediately returns and
sets the stream status to `CREATING`. After the stream is
created, Amazon Kinesis sets the stream status to `ACTIVE`.
You should perform read and write operations only on an
`ACTIVE` stream.
You receive a `LimitExceededException` when making a
`CreateStream` request if you try to do one of the following:
+ Have more than five streams in the `CREATING` state at any
point in time.
+ Create more shards than are authorized for your account.
The default limit for an AWS account is 10 shards per stream.
If you need to create a stream with more than 10 shards,
`contact AWS Support`_ to increase the limit on your account.
You can use `DescribeStream` to check the stream status, which
is returned in `StreamStatus`.
`CreateStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: A name to identify the stream. The stream name is
scoped to the AWS account used by the application that creates the
stream. It is also scoped by region. That is, two streams in two
different AWS accounts can have the same name, and two streams in
the same AWS account, but in two different regions, can have the
same name.
:type shard_count: integer
:param shard_count: The number of shards that the stream will use. The
throughput of the stream is a function of the number of shards;
more shards are required for greater provisioned throughput.
**Note:** The default limit for an AWS account is 10 shards per stream.
If you need to create a stream with more than 10 shards, `contact
AWS Support`_ to increase the limit on your account.
"""
params = {
'StreamName': stream_name,
'ShardCount': shard_count,
}
return self.make_request(action='CreateStream',
body=json.dumps(params))
def delete_stream(self, stream_name):
"""
Deletes a stream and all its shards and data. You must shut
down any applications that are operating on the stream before
you delete the stream. If an application attempts to operate
on a deleted stream, it will receive the exception
`ResourceNotFoundException`.
If the stream is in the `ACTIVE` state, you can delete it.
After a `DeleteStream` request, the specified stream is in the
`DELETING` state until Amazon Kinesis completes the deletion.
**Note:** Amazon Kinesis might continue to accept data read
and write operations, such as PutRecord, PutRecords, and
GetRecords, on a stream in the `DELETING` state until the
stream deletion is complete.
When you delete a stream, any shards in that stream are also
deleted, and any tags are dissociated from the stream.
You can use the DescribeStream operation to check the state of
the stream, which is returned in `StreamStatus`.
`DeleteStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to delete.
"""
params = {'StreamName': stream_name, }
return self.make_request(action='DeleteStream',
body=json.dumps(params))
def describe_stream(self, stream_name, limit=None,
exclusive_start_shard_id=None):
"""
Describes the specified stream.
The information about the stream includes its current status,
its Amazon Resource Name (ARN), and an array of shard objects.
For each shard object, there is information about the hash key
and sequence number ranges that the shard spans, and the IDs
of any earlier shards that played in a role in creating the
shard. A sequence number is the identifier associated with
every record ingested in the Amazon Kinesis stream. The
sequence number is assigned when a record is put into the
stream.
You can limit the number of returned shards using the `Limit`
parameter. The number of shards in a stream may be too large
to return from a single call to `DescribeStream`. You can
detect this by using the `HasMoreShards` flag in the returned
output. `HasMoreShards` is set to `True` when there is more
data available.
`DescribeStream` is a paginated operation. If there are more
shards available, you can request them using the shard ID of
the last shard returned. Specify this ID in the
`ExclusiveStartShardId` parameter in a subsequent request to
`DescribeStream`.
`DescribeStream` has a limit of 10 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to describe.
:type limit: integer
:param limit: The maximum number of shards to return.
:type exclusive_start_shard_id: string
:param exclusive_start_shard_id: The shard ID of the shard to start
with.
"""
params = {'StreamName': stream_name, }
if limit is not None:
params['Limit'] = limit
if exclusive_start_shard_id is not None:
params['ExclusiveStartShardId'] = exclusive_start_shard_id
return self.make_request(action='DescribeStream',
body=json.dumps(params))
def get_records(self, shard_iterator, limit=None, b64_decode=True):
"""
Gets data records from a shard.
Specify a shard iterator using the `ShardIterator` parameter.
The shard iterator specifies the position in the shard from
which you want to start reading data records sequentially. If
there are no records available in the portion of the shard
that the iterator points to, `GetRecords` returns an empty
list. Note that it might take multiple calls to get to a
portion of the shard that contains records.
You can scale by provisioning multiple shards. Your
application should have one thread per shard, each reading
continuously from its stream. To read from a stream
continually, call `GetRecords` in a loop. Use GetShardIterator
to get the shard iterator to specify in the first `GetRecords`
call. `GetRecords` returns a new shard iterator in
`NextShardIterator`. Specify the shard iterator returned in
`NextShardIterator` in subsequent calls to `GetRecords`. Note
that if the shard has been closed, the shard iterator can't
return more data and `GetRecords` returns `null` in
`NextShardIterator`. You can terminate the loop when the shard
is closed, or when the shard iterator reaches the record with
the sequence number or other attribute that marks it as the
last record to process.
Each data record can be up to 50 KB in size, and each shard
can read up to 2 MB per second. You can ensure that your calls
don't exceed the maximum supported size or throughput by using
the `Limit` parameter to specify the maximum number of records
that `GetRecords` can return. Consider your average record
size when determining this limit. For example, if your average
record size is 40 KB, you can limit the data returned to about
1 MB per call by specifying 25 as the limit.
The size of the data returned by `GetRecords` will vary
depending on the utilization of the shard. The maximum size of
data that `GetRecords` can return is 10 MB. If a call returns
10 MB of data, subsequent calls made within the next 5 seconds
throw `ProvisionedThroughputExceededException`. If there is
insufficient provisioned throughput on the shard, subsequent
calls made within the next 1 second throw
`ProvisionedThroughputExceededException`. Note that
`GetRecords` won't return any data when it throws an
exception. For this reason, we recommend that you wait one
second between calls to `GetRecords`; however, it's possible
that the application will get exceptions for longer than 1
second.
To detect whether the application is falling behind in
processing, add a timestamp to your records and note how long
it takes to process them. You can also monitor how much data
is in a stream using the CloudWatch metrics for write
operations ( `PutRecord` and `PutRecords`). For more
information, see `Monitoring Amazon Kinesis with Amazon
CloudWatch`_ in the Amazon Kinesis Developer Guide .
:type shard_iterator: string
:param shard_iterator: The position in the shard from which you want to
start sequentially reading data records. A shard iterator specifies
this position using the sequence number of a data record in the
shard.
:type limit: integer
:param limit: The maximum number of records to return. Specify a value
of up to 10,000. If you specify a value that is greater than
10,000, `GetRecords` throws `InvalidArgumentException`.
:type b64_decode: boolean
:param b64_decode: Decode the Base64-encoded ``Data`` field of records.
"""
params = {'ShardIterator': shard_iterator, }
if limit is not None:
params['Limit'] = limit
response = self.make_request(action='GetRecords',
body=json.dumps(params))
# Base64 decode the data
if b64_decode:
for record in response.get('Records', []):
record['Data'] = base64.b64decode(
record['Data'].encode('utf-8')).decode('utf-8')
return response
def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type,
starting_sequence_number=None):
"""
Gets a shard iterator. A shard iterator expires five minutes
after it is returned to the requester.
A shard iterator specifies the position in the shard from
which to start reading data records sequentially. A shard
iterator specifies this position using the sequence number of
a data record in a shard. A sequence number is the identifier
associated with every record ingested in the Amazon Kinesis
stream. The sequence number is assigned when a record is put
into the stream.
You must specify the shard iterator type. For example, you can
set the `ShardIteratorType` parameter to read exactly from the
position denoted by a specific sequence number by using the
`AT_SEQUENCE_NUMBER` shard iterator type, or right after the
sequence number by using the `AFTER_SEQUENCE_NUMBER` shard
iterator type, using sequence numbers returned by earlier
calls to PutRecord, PutRecords, GetRecords, or DescribeStream.
You can specify the shard iterator type `TRIM_HORIZON` in the
request to cause `ShardIterator` to point to the last
untrimmed record in the shard in the system, which is the
oldest data record in the shard. Or you can point to just
after the most recent record in the shard, by using the shard
iterator type `LATEST`, so that you always read the most
recent data in the shard.
When you repeatedly read from an Amazon Kinesis stream use a
GetShardIterator request to get the first shard iterator to to
use in your first `GetRecords` request and then use the shard
iterator returned by the `GetRecords` request in
`NextShardIterator` for subsequent reads. A new shard iterator
is returned by every `GetRecords` request in
`NextShardIterator`, which you use in the `ShardIterator`
parameter of the next `GetRecords` request.
If a `GetShardIterator` request is made too often, you receive
a `ProvisionedThroughputExceededException`. For more
information about throughput limits, see GetRecords.
If the shard is closed, the iterator can't return more data,
and `GetShardIterator` returns `null` for its `ShardIterator`.
A shard can be closed using SplitShard or MergeShards.
`GetShardIterator` has a limit of 5 transactions per second
per account per open shard.
:type stream_name: string
:param stream_name: The name of the stream.
:type shard_id: string
:param shard_id: The shard ID of the shard to get the iterator for.
:type shard_iterator_type: string
:param shard_iterator_type:
Determines how the shard iterator is used to start reading data records
from the shard.
The following are the valid shard iterator types:
+ AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted
by a specific sequence number.
+ AFTER_SEQUENCE_NUMBER - Start reading right after the position
denoted by a specific sequence number.
+ TRIM_HORIZON - Start reading at the last untrimmed record in the
shard in the system, which is the oldest data record in the shard.
+ LATEST - Start reading just after the most recent record in the
shard, so that you always read the most recent data in the shard.
:type starting_sequence_number: string
:param starting_sequence_number: The sequence number of the data record
in the shard from which to start reading from.
:returns: A dictionary containing:
1) a `ShardIterator` with the value being the shard-iterator object
"""
params = {
'StreamName': stream_name,
'ShardId': shard_id,
'ShardIteratorType': shard_iterator_type,
}
if starting_sequence_number is not None:
params['StartingSequenceNumber'] = starting_sequence_number
return self.make_request(action='GetShardIterator',
body=json.dumps(params))
def list_streams(self, limit=None, exclusive_start_stream_name=None):
"""
Lists your streams.
The number of streams may be too large to return from a single
call to `ListStreams`. You can limit the number of returned
streams using the `Limit` parameter. If you do not specify a
value for the `Limit` parameter, Amazon Kinesis uses the
default limit, which is currently 10.
You can detect if there are more streams available to list by
using the `HasMoreStreams` flag from the returned output. If
there are more streams available, you can request more streams
by using the name of the last stream returned by the
`ListStreams` request in the `ExclusiveStartStreamName`
parameter in a subsequent request to `ListStreams`. The group
of stream names returned by the subsequent request is then
added to the list. You can continue this process until all the
stream names have been collected in the list.
`ListStreams` has a limit of 5 transactions per second per
account.
:type limit: integer
:param limit: The maximum number of streams to list.
:type exclusive_start_stream_name: string
:param exclusive_start_stream_name: The name of the stream to start the
list with.
"""
params = {}
if limit is not None:
params['Limit'] = limit
if exclusive_start_stream_name is not None:
params['ExclusiveStartStreamName'] = exclusive_start_stream_name
return self.make_request(action='ListStreams',
body=json.dumps(params))
def list_tags_for_stream(self, stream_name, exclusive_start_tag_key=None,
limit=None):
"""
Lists the tags for the specified Amazon Kinesis stream.
:type stream_name: string
:param stream_name: The name of the stream.
:type exclusive_start_tag_key: string
:param exclusive_start_tag_key: The key to use as the starting point
for the list of tags. If this parameter is set, `ListTagsForStream`
gets all tags that occur after `ExclusiveStartTagKey`.
:type limit: integer
:param limit: The number of tags to return. If this number is less than
the total number of tags associated with the stream, `HasMoreTags`
is set to `True`. To list additional tags, set
`ExclusiveStartTagKey` to the last key in the response.
"""
params = {'StreamName': stream_name, }
if exclusive_start_tag_key is not None:
params['ExclusiveStartTagKey'] = exclusive_start_tag_key
if limit is not None:
params['Limit'] = limit
return self.make_request(action='ListTagsForStream',
body=json.dumps(params))
def merge_shards(self, stream_name, shard_to_merge,
adjacent_shard_to_merge):
"""
Merges two adjacent shards in a stream and combines them into
a single shard to reduce the stream's capacity to ingest and
transport data. Two shards are considered adjacent if the
union of the hash key ranges for the two shards form a
contiguous set with no gaps. For example, if you have two
shards, one with a hash key range of 276...381 and the other
with a hash key range of 382...454, then you could merge these
two shards into a single shard that would have a hash key
range of 276...454. After the merge, the single child shard
receives data for all hash key values covered by the two
parent shards.
`MergeShards` is called when there is a need to reduce the
overall capacity of a stream because of excess capacity that
is not being used. You must specify the shard to be merged and
the adjacent shard for a stream. For more information about
merging shards, see `Merge Two Shards`_ in the Amazon Kinesis
Developer Guide .
If the stream is in the `ACTIVE` state, you can call
`MergeShards`. If a stream is in the `CREATING`, `UPDATING`,
or `DELETING` state, `MergeShards` returns a
`ResourceInUseException`. If the specified stream does not
exist, `MergeShards` returns a `ResourceNotFoundException`.
You can use DescribeStream to check the state of the stream,
which is returned in `StreamStatus`.
`MergeShards` is an asynchronous operation. Upon receiving a
`MergeShards` request, Amazon Kinesis immediately returns a
response and sets the `StreamStatus` to `UPDATING`. After the
operation is completed, Amazon Kinesis sets the `StreamStatus`
to `ACTIVE`. Read and write operations continue to work while
the stream is in the `UPDATING` state.
You use DescribeStream to determine the shard IDs that are
specified in the `MergeShards` request.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, `MergeShards` or SplitShard, you
will receive a `LimitExceededException`.
`MergeShards` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the merge.
:type shard_to_merge: string
:param shard_to_merge: The shard ID of the shard to combine with the
adjacent shard for the merge.
:type adjacent_shard_to_merge: string
:param adjacent_shard_to_merge: The shard ID of the adjacent shard for
the merge.
"""
params = {
'StreamName': stream_name,
'ShardToMerge': shard_to_merge,
'AdjacentShardToMerge': adjacent_shard_to_merge,
}
return self.make_request(action='MergeShards',
body=json.dumps(params))
def put_record(self, stream_name, data, partition_key,
explicit_hash_key=None,
sequence_number_for_ordering=None,
exclusive_minimum_sequence_number=None,
b64_encode=True):
"""
This operation puts a data record into an Amazon Kinesis
stream from a producer. This operation must be called to send
data from the producer into the Amazon Kinesis stream for
real-time ingestion and subsequent processing. The `PutRecord`
operation requires the name of the stream that captures,
stores, and transports the data; a partition key; and the data
blob itself. The data blob could be a segment from a log file,
geographic/location data, website clickstream data, or any
other data type.
The partition key is used to distribute data across shards.
Amazon Kinesis segregates the data records that belong to a
data stream into multiple shards, using the partition key
associated with each data record to determine which shard a
given data record belongs to.
Partition keys are Unicode strings, with a maximum length
limit of 256 bytes. An MD5 hash function is used to map
partition keys to 128-bit integer values and to map associated
data records to shards using the hash key ranges of the
shards. You can override hashing the partition key to
determine the shard by explicitly specifying a hash value
using the `ExplicitHashKey` parameter. For more information,
see the `Amazon Kinesis Developer Guide`_.
`PutRecord` returns the shard ID of where the data record was
placed and the sequence number that was assigned to the data
record.
Sequence numbers generally increase over time. To guarantee
strictly increasing ordering, use the
`SequenceNumberForOrdering` parameter. For more information,
see the `Amazon Kinesis Developer Guide`_.
If a `PutRecord` request cannot be processed because of
insufficient provisioned throughput on the shard involved in
the request, `PutRecord` throws
`ProvisionedThroughputExceededException`.
Data records are accessible for only 24 hours from the time
that they are added to an Amazon Kinesis stream.
:type stream_name: string
:param stream_name: The name of the stream to put the data record into.
:type data: blob
:param data: The data blob to put into the record, which is
Base64-encoded when the blob is serialized.
The maximum size of the data blob (the payload after
Base64-decoding) is 50 kilobytes (KB)
Set `b64_encode` to disable automatic Base64 encoding.
:type partition_key: string
:param partition_key: Determines which shard in the stream the data
record is assigned to. Partition keys are Unicode strings with a
maximum length limit of 256 bytes. Amazon Kinesis uses the
partition key as input to a hash function that maps the partition
key and associated data to a specific shard. Specifically, an MD5
hash function is used to map partition keys to 128-bit integer
values and to map associated data records to shards. As a result of
this hashing mechanism, all data records with the same partition
key will map to the same shard within the stream.
:type explicit_hash_key: string
:param explicit_hash_key: The hash value used to explicitly determine
the shard the data record is assigned to by overriding the
partition key hash.
:type sequence_number_for_ordering: string
:param sequence_number_for_ordering: Guarantees strictly increasing
sequence numbers, for puts from the same client and to the same
partition key. Usage: set the `SequenceNumberForOrdering` of record
n to the sequence number of record n-1 (as returned in the
PutRecordResult when putting record n-1 ). If this parameter is not
set, records will be coarsely ordered based on arrival time.
:type b64_encode: boolean
:param b64_encode: Whether to Base64 encode `data`. Can be set to
``False`` if `data` is already encoded to prevent double encoding.
"""
params = {
'StreamName': stream_name,
'Data': data,
'PartitionKey': partition_key,
}
if explicit_hash_key is not None:
params['ExplicitHashKey'] = explicit_hash_key
if sequence_number_for_ordering is not None:
params['SequenceNumberForOrdering'] = sequence_number_for_ordering
if b64_encode:
if not isinstance(params['Data'], six.binary_type):
params['Data'] = params['Data'].encode('utf-8')
params['Data'] = base64.b64encode(params['Data']).decode('utf-8')
return self.make_request(action='PutRecord',
body=json.dumps(params))
def put_records(self, records, stream_name, b64_encode=True):
"""
Puts (writes) multiple data records from a producer into an
Amazon Kinesis stream in a single call (also referred to as a
`PutRecords` request). Use this operation to send data from a
data producer into the Amazon Kinesis stream for real-time
ingestion and processing. Each shard can support up to 1000
records written per second, up to a maximum total of 1 MB data
written per second.
You must specify the name of the stream that captures, stores,
and transports the data; and an array of request `Records`,
with each record in the array requiring a partition key and
data blob.
The data blob can be any type of data; for example, a segment
from a log file, geographic/location data, website clickstream
data, and so on.
The partition key is used by Amazon Kinesis as input to a hash
function that maps the partition key and associated data to a
specific shard. An MD5 hash function is used to map partition
keys to 128-bit integer values and to map associated data
records to shards. As a result of this hashing mechanism, all
data records with the same partition key map to the same shard
within the stream. For more information, see `Partition Key`_
in the Amazon Kinesis Developer Guide .
Each record in the `Records` array may include an optional
parameter, `ExplicitHashKey`, which overrides the partition
key to shard mapping. This parameter allows a data producer to
determine explicitly the shard where the record is stored. For
more information, see `Adding Multiple Records with
PutRecords`_ in the Amazon Kinesis Developer Guide .
The `PutRecords` response includes an array of response
`Records`. Each record in the response array directly
correlates with a record in the request array using natural
ordering, from the top to the bottom of the request and
response. The response `Records` array always includes the
same number of records as the request array.
The response `Records` array includes both successfully and
unsuccessfully processed records. Amazon Kinesis attempts to
process all records in each `PutRecords` request. A single
record failure does not stop the processing of subsequent
records.
A successfully-processed record includes `ShardId` and
`SequenceNumber` values. The `ShardId` parameter identifies
the shard in the stream where the record is stored. The
`SequenceNumber` parameter is an identifier assigned to the
put record, unique to all records in the stream.
An unsuccessfully-processed record includes `ErrorCode` and
`ErrorMessage` values. `ErrorCode` reflects the type of error
and can be one of the following values:
`ProvisionedThroughputExceededException` or `InternalFailure`.
`ErrorMessage` provides more detailed information about the
`ProvisionedThroughputExceededException` exception including
the account ID, stream name, and shard ID of the record that
was throttled.
Data records are accessible for only 24 hours from the time
that they are added to an Amazon Kinesis stream.
:type records: list
:param records: The records associated with the request.
:type stream_name: string
:param stream_name: The stream name associated with the request.
:type b64_encode: boolean
:param b64_encode: Whether to Base64 encode `data`. Can be set to
``False`` if `data` is already encoded to prevent double encoding.
"""
params = {'Records': records, 'StreamName': stream_name, }
if b64_encode:
for i in range(len(params['Records'])):
data = params['Records'][i]['Data']
if not isinstance(data, six.binary_type):
data = data.encode('utf-8')
params['Records'][i]['Data'] = base64.b64encode(
data).decode('utf-8')
return self.make_request(action='PutRecords',
body=json.dumps(params))
def remove_tags_from_stream(self, stream_name, tag_keys):
"""
Deletes tags from the specified Amazon Kinesis stream.
If you specify a tag that does not exist, it is ignored.
:type stream_name: string
:param stream_name: The name of the stream.
:type tag_keys: list
:param tag_keys: A list of tag keys. Each corresponding tag is removed
from the stream.
"""
params = {'StreamName': stream_name, 'TagKeys': tag_keys, }
return self.make_request(action='RemoveTagsFromStream',
body=json.dumps(params))
def split_shard(self, stream_name, shard_to_split, new_starting_hash_key):
"""
Splits a shard into two new shards in the stream, to increase
the stream's capacity to ingest and transport data.
`SplitShard` is called when there is a need to increase the
overall capacity of stream because of an expected increase in
the volume of data records being ingested.
You can also use `SplitShard` when a shard appears to be
approaching its maximum utilization, for example, when the set
of producers sending data into the specific shard are suddenly
sending more than previously anticipated. You can also call
`SplitShard` to increase stream capacity, so that more Amazon
Kinesis applications can simultaneously read data from the
stream for real-time processing.
You must specify the shard to be split and the new hash key,
which is the position in the shard where the shard gets split
in two. In many cases, the new hash key might simply be the
average of the beginning and ending hash key, but it can be
any hash key value in the range being mapped into the shard.
For more information about splitting shards, see `Split a
Shard`_ in the Amazon Kinesis Developer Guide .
You can use DescribeStream to determine the shard ID and hash
key values for the `ShardToSplit` and `NewStartingHashKey`
parameters that are specified in the `SplitShard` request.
`SplitShard` is an asynchronous operation. Upon receiving a
`SplitShard` request, Amazon Kinesis immediately returns a
response and sets the stream status to `UPDATING`. After the
operation is completed, Amazon Kinesis sets the stream status
to `ACTIVE`. Read and write operations continue to work while
the stream is in the `UPDATING` state.
You can use `DescribeStream` to check the status of the
stream, which is returned in `StreamStatus`. If the stream is
in the `ACTIVE` state, you can call `SplitShard`. If a stream
is in `CREATING` or `UPDATING` or `DELETING` states,
`DescribeStream` returns a `ResourceInUseException`.
If the specified stream does not exist, `DescribeStream`
returns a `ResourceNotFoundException`. If you try to create
more shards than are authorized for your account, you receive
a `LimitExceededException`.
The default limit for an AWS account is 10 shards per stream.
If you need to create a stream with more than 10 shards,
`contact AWS Support`_ to increase the limit on your account.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, MergeShards or SplitShard, you
receive a `LimitExceededException`.
`SplitShard` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the shard split.
:type shard_to_split: string
:param shard_to_split: The shard ID of the shard to split.
:type new_starting_hash_key: string
:param new_starting_hash_key: A hash key value for the starting hash
key of one of the child shards created by the split. The hash key
range for a given shard constitutes a set of ordered contiguous
positive integers. The value for `NewStartingHashKey` must be in
the range of hash keys being mapped into the shard. The
`NewStartingHashKey` hash key value and all higher hash key values
in hash key range are distributed to one of the child shards. All
the lower hash key values in the range are distributed to the other
child shard.
"""
params = {
'StreamName': stream_name,
'ShardToSplit': shard_to_split,
'NewStartingHashKey': new_starting_hash_key,
}
return self.make_request(action='SplitShard',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response.getheaders())
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| mit |
elssar/calibre | src/odf/style.py | 94 | 4676 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import STYLENS
from element import Element
def StyleElement(**args):
e = Element(**args)
if args.get('check_grammar', True) == True:
if not args.has_key('displayname'):
e.setAttrNS(STYLENS,'display-name', args.get('name'))
return e
# Autogenerated
def BackgroundImage(**args):
return Element(qname = (STYLENS,'background-image'), **args)
def ChartProperties(**args):
return Element(qname = (STYLENS,'chart-properties'), **args)
def Column(**args):
return Element(qname = (STYLENS,'column'), **args)
def ColumnSep(**args):
return Element(qname = (STYLENS,'column-sep'), **args)
def Columns(**args):
return Element(qname = (STYLENS,'columns'), **args)
def DefaultStyle(**args):
return Element(qname = (STYLENS,'default-style'), **args)
def DrawingPageProperties(**args):
return Element(qname = (STYLENS,'drawing-page-properties'), **args)
def DropCap(**args):
return Element(qname = (STYLENS,'drop-cap'), **args)
def FontFace(**args):
return Element(qname = (STYLENS,'font-face'), **args)
def Footer(**args):
return Element(qname = (STYLENS,'footer'), **args)
def FooterLeft(**args):
return Element(qname = (STYLENS,'footer-left'), **args)
def FooterStyle(**args):
return Element(qname = (STYLENS,'footer-style'), **args)
def FootnoteSep(**args):
return Element(qname = (STYLENS,'footnote-sep'), **args)
def GraphicProperties(**args):
return Element(qname = (STYLENS,'graphic-properties'), **args)
def HandoutMaster(**args):
return Element(qname = (STYLENS,'handout-master'), **args)
def Header(**args):
return Element(qname = (STYLENS,'header'), **args)
def HeaderFooterProperties(**args):
return Element(qname = (STYLENS,'header-footer-properties'), **args)
def HeaderLeft(**args):
return Element(qname = (STYLENS,'header-left'), **args)
def HeaderStyle(**args):
return Element(qname = (STYLENS,'header-style'), **args)
def ListLevelProperties(**args):
return Element(qname = (STYLENS,'list-level-properties'), **args)
def Map(**args):
return Element(qname = (STYLENS,'map'), **args)
def MasterPage(**args):
return StyleElement(qname = (STYLENS,'master-page'), **args)
def PageLayout(**args):
return Element(qname = (STYLENS,'page-layout'), **args)
def PageLayoutProperties(**args):
return Element(qname = (STYLENS,'page-layout-properties'), **args)
def ParagraphProperties(**args):
return Element(qname = (STYLENS,'paragraph-properties'), **args)
def PresentationPageLayout(**args):
return StyleElement(qname = (STYLENS,'presentation-page-layout'), **args)
def RegionCenter(**args):
return Element(qname = (STYLENS,'region-center'), **args)
def RegionLeft(**args):
return Element(qname = (STYLENS,'region-left'), **args)
def RegionRight(**args):
return Element(qname = (STYLENS,'region-right'), **args)
def RubyProperties(**args):
return Element(qname = (STYLENS,'ruby-properties'), **args)
def SectionProperties(**args):
return Element(qname = (STYLENS,'section-properties'), **args)
def Style(**args):
return StyleElement(qname = (STYLENS,'style'), **args)
def TabStop(**args):
return Element(qname = (STYLENS,'tab-stop'), **args)
def TabStops(**args):
return Element(qname = (STYLENS,'tab-stops'), **args)
def TableCellProperties(**args):
return Element(qname = (STYLENS,'table-cell-properties'), **args)
def TableColumnProperties(**args):
return Element(qname = (STYLENS,'table-column-properties'), **args)
def TableProperties(**args):
return Element(qname = (STYLENS,'table-properties'), **args)
def TableRowProperties(**args):
return Element(qname = (STYLENS,'table-row-properties'), **args)
def TextProperties(**args):
return Element(qname = (STYLENS,'text-properties'), **args)
| gpl-3.0 |
keelhaule/alfanous | src/alfanous-django/wui/templatetags/kwacros.py | 7 | 6077 | #
# templatetags/kwacros.py - Support for macros in Django templates
#
# Based on snippet by
# Author: Michal Ludvig <michal@logix.cz>
# http://www.logix.cz/michal
#
# modified for args and kwargs by Skylar Saveland http://skyl.org
#
"""
Usage example:
0) Save this file as <yourapp>/templatetags/kwacros.py
1) In your template load the library:
{% load kwacros %}
2) Define a new macro called 'my_macro' that takes two args and a kwarg.
All will be optional.
{% kwacro test2args1kwarg arg1 arg2 baz="Default baz" %}
{% firstof arg1 "default arg1" %}
{% if arg2 %}{{ arg2 }}{% else %}default arg2{% endif %}
{{ baz }}
{% endkwacro %}
3) Use the macro with a string parameters or context variables::
{% usekwacro test2args1kwarg "foo" "bar" baz="KW" %}
<br>
{% usekwacro test2args1kwarg num_pages "bar" %}
<br>
{% usekwacro test2args1kwarg %}
<br>
{% usekwacro test2args1kwarg "new" "foobar"|join:"," baz="diff kwarg" %}
renders like
foo bar KW
77 bar Default baz
default arg1 default arg2 Default baz
new f,o,o,b,a,r diff kwarg
4) Alternatively save your macros in a separate
file, e.g. "mymacros.html" and load it to the
current template with:
{% loadkwacros "mymacros.html" %}
Then use these loaded macros in {% usekwacro %}
as described above.
Bear in mind that defined and loaded kwacros are local
to each template file and are not inherited
through {% extends ... %} tags.
"""
from django import template
from django.template import FilterExpression
from django.template.loader import get_template
register = template.Library()
def _setup_macros_dict(parser):
## Metadata of each macro are stored in a new attribute
## of 'parser' class. That way we can access it later
## in the template when processing 'usemacro' tags.
try:
## Only try to access it to eventually trigger an exception
parser._macros
except AttributeError:
parser._macros = {}
class DefineMacroNode(template.Node):
def __init__(self, name, nodelist, args):
self.name = name
self.nodelist = nodelist
self.args = []
self.kwargs = {}
for a in args:
if "=" not in a:
self.args.append(a)
else:
name, value = a.split("=")
self.kwargs[name] = value
def render(self, context):
## empty string - {% macro %} tag does no output
return ''
@register.tag(name="kwacro")
def do_macro(parser, token):
try:
args = token.split_contents()
tag_name, macro_name, args = args[0], args[1], args[2:]
except IndexError:
m = ("'%s' tag requires at least one argument (macro name)"
% token.contents.split()[0])
raise template.TemplateSyntaxError, m
# TODO: could do some validations here,
# for now, "blow your head clean off"
nodelist = parser.parse(('endkwacro', ))
parser.delete_first_token()
## Metadata of each macro are stored in a new attribute
## of 'parser' class. That way we can access it later
## in the template when processing 'usemacro' tags.
_setup_macros_dict(parser)
parser._macros[macro_name] = DefineMacroNode(macro_name, nodelist, args)
return parser._macros[macro_name]
class LoadMacrosNode(template.Node):
def render(self, context):
## empty string - {% loadmacros %} tag does no output
return ''
@register.tag(name="loadkwacros")
def do_loadmacros(parser, token):
try:
tag_name, filename = token.split_contents()
except IndexError:
m = ("'%s' tag requires at least one argument (macro name)"
% token.contents.split()[0])
raise template.TemplateSyntaxError, m
if filename[0] in ('"', "'") and filename[-1] == filename[0]:
filename = filename[1:-1]
t = get_template(filename)
macros = t.nodelist.get_nodes_by_type(DefineMacroNode)
## Metadata of each macro are stored in a new attribute
## of 'parser' class. That way we can access it later
## in the template when processing 'usemacro' tags.
_setup_macros_dict(parser)
for macro in macros:
parser._macros[macro.name] = macro
return LoadMacrosNode()
class UseMacroNode(template.Node):
def __init__(self, macro, fe_args, fe_kwargs):
self.macro = macro
self.fe_args = fe_args
self.fe_kwargs = fe_kwargs
def render(self, context):
for i, arg in enumerate(self.macro.args):
try:
fe = self.fe_args[i]
context[arg] = fe.resolve(context)
except IndexError:
context[arg] = ""
for name, default in self.macro.kwargs.iteritems():
if name in self.fe_kwargs:
context[name] = self.fe_kwargs[name].resolve(context)
else:
context[name] = FilterExpression(default,
self.macro.parser
).resolve(context)
return self.macro.nodelist.render(context)
@register.tag(name="usekwacro")
def do_usemacro(parser, token):
try:
args = token.split_contents()
tag_name, macro_name, values = args[0], args[1], args[2:]
except IndexError:
m = ("'%s' tag requires at least one argument (macro name)"
% token.contents.split()[0])
raise template.TemplateSyntaxError, m
try:
macro = parser._macros[macro_name]
except (AttributeError, KeyError):
m = "Macro '%s' is not defined" % macro_name
raise template.TemplateSyntaxError, m
fe_kwargs = {}
fe_args = []
for val in values:
if "=" in val:
# kwarg
name, value = val.split("=")
fe_kwargs[name] = FilterExpression(value, parser)
else: # arg
# no validation, go for it ...
fe_args.append(FilterExpression(val, parser))
macro.parser = parser
return UseMacroNode(macro, fe_args, fe_kwargs)
| agpl-3.0 |
iulian787/spack | var/spack/repos/builtin/packages/sloccount/package.py | 2 | 1122 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Sloccount(MakefilePackage):
"""SLOCCount is a set of tools for counting physical Source Lines of Code
(SLOC) in a large number of languages of a potentially large set of
programs."""
homepage = "https://dwheeler.com/sloccount/"
url = "https://dwheeler.com/sloccount/sloccount-2.26.tar.gz"
version('2.26', sha256='fa7fa2bbf2f627dd2d0fdb958bd8ec4527231254c120a8b4322405d8a4e3d12b')
# md5sum needed at run-time
depends_on('coreutils', type=('build', 'run'))
def edit(self, spec, prefix):
makefile = FileFilter('makefile')
makefile.filter('^PREFIX=.*', 'PREFIX=' + prefix)
makefile.filter('^CC=.*', 'CC=' + spack_cc)
# Needed for `make test` to pass
makefile.filter('PATH=.:${PATH}', 'PATH=$(CURDIR):${PATH}',
string=True)
def install(self, spec, prefix):
mkdir(prefix.bin)
make('install')
| lgpl-2.1 |
fedebell/Laboratorio3 | relazione15/boltzPlot.py | 1 | 1911 | import uncertainties
from uncertainties import ufloat
import math
import numpy
import numpy
import pylab
from scipy.optimize import curve_fit
import math
import scipy.stats
import uncertainties
from uncertainties import unumpy
def quadratic(x, V0, Rt):
return V0*(1+(x/Rt))**0.5
INPUT = "/home/federico/Laboratorio3/relazione15/boltmannEstesi.txt"
R, dR, V, dV= pylab.loadtxt(INPUT, unpack=True)
pylab.rc('font',size=13)
pylab.title('$ V_{rms} \, \, vs \, \,R$', fontsize = "16")
pylab.ylabel('$V_{rms}\,(mV)$', size = "14")
pylab.xlabel('$R\,(k\Omega)$', size = "14")
pylab.grid(color = "gray")
#Controllare ordine
pylab.errorbar(R, V, dV, dR, ".", color="black")
#V = unumpy.uarray(V, dV)
#R = unumpy.uarray(R, dR)
#Fare attenzione che gli errori devono essere sempre sommati con le stesse unita di misura,
#quindi nel coeffiente ci cade anche il fattore per passare da una unita di misura all'altra
#TODO: Make it better
error = dV
init = numpy.array([80, 40])
#Errori tutti statistici
par, cov = curve_fit(quadratic, R, V, init, error, absolute_sigma = "true")
#trattazione statistica degli errori
print(par, cov)
#Di nuovo co capisco il chi quadro, non cambia nulla se cambio da true a false
V0 = par[0]
Rt = par[1]
dV0 = (cov[0][0])**0.5
dRt = (cov[1][1])**0.5
print("V0 =", V0, "+/-", dV0)
print("Rt =", Rt, "+/-", dRt)
chisq = ((V-quadratic(R, V0, Rt))/error)**2
somma = sum(chisq)
ndof = len(unumpy.nominal_values(V)) - 2 #Tolgo due parametri estratti dal fit
p=1.0-scipy.stats.chi2.cdf(somma, ndof)
print("Chisquare/ndof = %f/%d" % (somma, ndof))
print("p = ", p)
div = 1000
bucket = numpy.array([0.0 for i in range(div)])
retta = numpy.array([0.0 for i in range(div)])
inc = (R.max()-R.min())/div
for i in range(len(bucket)):
bucket[i]=float(i)*inc + R.min()
retta[i] = quadratic(bucket[i], par[0], par[1])
pylab.plot(bucket, retta, color = "red")
pylab.show()
| gpl-3.0 |
lgarren/spack | var/spack/repos/builtin/packages/font-misc-ethiopic/package.py | 3 | 2109 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class FontMiscEthiopic(Package):
"""X.org misc-ethiopic font."""
homepage = "http://cgit.freedesktop.org/xorg/font/misc-ethiopic"
url = "https://www.x.org/archive/individual/font/font-misc-ethiopic-1.0.3.tar.gz"
version('1.0.3', '02ddea9338d9d36804ad38f3daadb55a')
depends_on('font-util')
depends_on('fontconfig', type='build')
depends_on('mkfontdir', type='build')
depends_on('mkfontscale', type='build')
depends_on('pkg-config@0.9.0:', type='build')
depends_on('util-macros', type='build')
def install(self, spec, prefix):
configure('--prefix={0}'.format(prefix))
make('install')
# `make install` copies the files to the font-util installation.
# Create a fake directory to convince Spack that we actually
# installed something.
mkdir(prefix.lib)
| lgpl-2.1 |
browseinfo/7.0-server | openerp/tools/which.py | 456 | 6884 | #!/usr/bin/env python
""" Which - locate a command
* adapted from Brian Curtin's http://bugs.python.org/file15381/shutil_which.patch
* see http://bugs.python.org/issue444582
* uses ``PATHEXT`` on Windows
* searches current directory before ``PATH`` on Windows,
but not before an explicitly passed path
* accepts both string or iterable for an explicitly passed path, or pathext
* accepts an explicitly passed empty path, or pathext (either '' or [])
* does not search ``PATH`` for files that have a path specified in their name already
* moved defpath and defpathext lists initialization to module level,
instead of initializing them on each function call
* changed interface: which_files() returns generator, which() returns first match,
or raises IOError(errno.ENOENT)
.. function:: which_files(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return a generator which yields full paths in which the *file* name exists
in a directory that is part of the file name, or on *path*,
and has the given *mode*.
By default, *mode* matches an inclusive OR of os.F_OK and os.X_OK - an
existing executable file.
The *path* is, by default, the ``PATH`` variable on the platform,
or the string/iterable passed in as *path*.
In the event that a ``PATH`` variable is not found, :const:`os.defpath` is used.
On Windows, a current directory is searched before using the ``PATH`` variable,
but not before an explicitly passed *path*.
The *pathext* is only used on Windows to match files with given extensions appended as well.
It defaults to the ``PATHEXT`` variable, or the string/iterable passed in as *pathext*.
In the event that a ``PATHEXT`` variable is not found,
default value for Windows XP/Vista is used.
The command is always searched without extension first,
even when *pathext* is explicitly passed.
.. function:: which(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return first match generated by which_files(file, mode, path, pathext),
or raise IOError(errno.ENOENT).
"""
__docformat__ = 'restructuredtext en'
__all__ = 'which which_files pathsep defpath defpathext F_OK R_OK W_OK X_OK'.split()
import sys
from os import access, defpath, pathsep, environ, F_OK, R_OK, W_OK, X_OK
from os.path import exists, dirname, split, join
windows = sys.platform.startswith('win')
defpath = environ.get('PATH', defpath).split(pathsep)
if windows:
defpath.insert(0, '.') # can insert without checking, when duplicates are removed
# given the quite usual mess in PATH on Windows, let's rather remove duplicates
seen = set()
defpath = [dir for dir in defpath if dir.lower() not in seen and not seen.add(dir.lower())]
del seen
defpathext = [''] + environ.get('PATHEXT',
'.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC').lower().split(pathsep)
else:
defpathext = ['']
def which_files(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function yields full paths (not necessarily absolute paths),
in which the given file name matches an existing file in a directory on the path.
>>> def test_which(expected, *args, **argd):
... result = list(which_files(*args, **argd))
... assert result == expected, 'which_files: %s != %s' % (result, expected)
...
... try:
... result = [ which(*args, **argd) ]
... except IOError:
... result = []
... assert result[:1] == expected[:1], 'which: %s != %s' % (result[:1], expected[:1])
>>> if windows: cmd = environ['COMSPEC']
>>> if windows: test_which([cmd], 'cmd')
>>> if windows: test_which([cmd], 'cmd.exe')
>>> if windows: test_which([cmd], 'cmd', path=dirname(cmd))
>>> if windows: test_which([cmd], 'cmd', pathext='.exe')
>>> if windows: test_which([cmd], cmd)
>>> if windows: test_which([cmd], cmd, path='<nonexistent>')
>>> if windows: test_which([cmd], cmd, pathext='<nonexistent>')
>>> if windows: test_which([cmd], cmd[:-4])
>>> if windows: test_which([cmd], cmd[:-4], path='<nonexistent>')
>>> if windows: test_which([], 'cmd', path='<nonexistent>')
>>> if windows: test_which([], 'cmd', pathext='<nonexistent>')
>>> if windows: test_which([], '<nonexistent>/cmd')
>>> if windows: test_which([], cmd[:-4], pathext='<nonexistent>')
>>> if not windows: sh = '/bin/sh'
>>> if not windows: test_which([sh], 'sh')
>>> if not windows: test_which([sh], 'sh', path=dirname(sh))
>>> if not windows: test_which([sh], 'sh', pathext='<nonexistent>')
>>> if not windows: test_which([sh], sh)
>>> if not windows: test_which([sh], sh, path='<nonexistent>')
>>> if not windows: test_which([sh], sh, pathext='<nonexistent>')
>>> if not windows: test_which([], 'sh', mode=W_OK) # not running as root, are you?
>>> if not windows: test_which([], 'sh', path='<nonexistent>')
>>> if not windows: test_which([], '<nonexistent>/sh')
"""
filepath, file = split(file)
if filepath:
path = (filepath,)
elif path is None:
path = defpath
elif isinstance(path, str):
path = path.split(pathsep)
if pathext is None:
pathext = defpathext
elif isinstance(pathext, str):
pathext = pathext.split(pathsep)
if not '' in pathext:
pathext.insert(0, '') # always check command without extension, even for custom pathext
for dir in path:
basepath = join(dir, file)
for ext in pathext:
fullpath = basepath + ext
if exists(fullpath) and access(fullpath, mode):
yield fullpath
def which(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function returns full path (not necessarily absolute path),
in which the given file name matches an existing file in a directory on the path,
or raises IOError(errno.ENOENT).
>>> # for doctest see which_files()
"""
try:
return iter(which_files(file, mode, path, pathext)).next()
except StopIteration:
try:
from errno import ENOENT
except ImportError:
ENOENT = 2
raise IOError(ENOENT, '%s not found' % (mode & X_OK and 'command' or 'file'), file)
if __name__ == '__main__':
import doctest
doctest.testmod()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
CARocha/sitioreddes | multimedia/migrations/0002_auto__add_multimedia.py | 1 | 4917 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Multimedia'
db.create_table(u'multimedia_multimedia', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('titulo', self.gf('django.db.models.fields.CharField')(max_length=250)),
))
db.send_create_signal(u'multimedia', ['Multimedia'])
def backwards(self, orm):
# Deleting model 'Multimedia'
db.delete_table(u'multimedia_multimedia')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'multimedia.adjuntos': {
'Meta': {'object_name': 'Adjuntos'},
'archivo': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'multimedia.audio': {
'Meta': {'object_name': 'Audio'},
'audio': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'multimedia.fotos': {
'Meta': {'object_name': 'Fotos'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imagen': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'multimedia.multimedia': {
'Meta': {'object_name': 'Multimedia'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'multimedia.videos': {
'Meta': {'object_name': 'Videos'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['multimedia'] | mit |
jhayworth/config | .emacs.d/elpy/rpc-venv/lib/python2.7/encodings/cp720.py | 417 | 13694 | """Python Character Mapping Codec cp720 generated on Windows:
Vista 6.0.6002 SP2 Multiprocessor Free with the command:
python Tools/unicode/genwincodec.py 720
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp720',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\x80'
u'\x81'
u'\xe9' # 0x82 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x83 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\x84'
u'\xe0' # 0x85 -> LATIN SMALL LETTER A WITH GRAVE
u'\x86'
u'\xe7' # 0x87 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x88 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x89 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x8A -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x8B -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x8C -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\x8d'
u'\x8e'
u'\x8f'
u'\x90'
u'\u0651' # 0x91 -> ARABIC SHADDA
u'\u0652' # 0x92 -> ARABIC SUKUN
u'\xf4' # 0x93 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xa4' # 0x94 -> CURRENCY SIGN
u'\u0640' # 0x95 -> ARABIC TATWEEL
u'\xfb' # 0x96 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x97 -> LATIN SMALL LETTER U WITH GRAVE
u'\u0621' # 0x98 -> ARABIC LETTER HAMZA
u'\u0622' # 0x99 -> ARABIC LETTER ALEF WITH MADDA ABOVE
u'\u0623' # 0x9A -> ARABIC LETTER ALEF WITH HAMZA ABOVE
u'\u0624' # 0x9B -> ARABIC LETTER WAW WITH HAMZA ABOVE
u'\xa3' # 0x9C -> POUND SIGN
u'\u0625' # 0x9D -> ARABIC LETTER ALEF WITH HAMZA BELOW
u'\u0626' # 0x9E -> ARABIC LETTER YEH WITH HAMZA ABOVE
u'\u0627' # 0x9F -> ARABIC LETTER ALEF
u'\u0628' # 0xA0 -> ARABIC LETTER BEH
u'\u0629' # 0xA1 -> ARABIC LETTER TEH MARBUTA
u'\u062a' # 0xA2 -> ARABIC LETTER TEH
u'\u062b' # 0xA3 -> ARABIC LETTER THEH
u'\u062c' # 0xA4 -> ARABIC LETTER JEEM
u'\u062d' # 0xA5 -> ARABIC LETTER HAH
u'\u062e' # 0xA6 -> ARABIC LETTER KHAH
u'\u062f' # 0xA7 -> ARABIC LETTER DAL
u'\u0630' # 0xA8 -> ARABIC LETTER THAL
u'\u0631' # 0xA9 -> ARABIC LETTER REH
u'\u0632' # 0xAA -> ARABIC LETTER ZAIN
u'\u0633' # 0xAB -> ARABIC LETTER SEEN
u'\u0634' # 0xAC -> ARABIC LETTER SHEEN
u'\u0635' # 0xAD -> ARABIC LETTER SAD
u'\xab' # 0xAE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xAF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0xB0 -> LIGHT SHADE
u'\u2592' # 0xB1 -> MEDIUM SHADE
u'\u2593' # 0xB2 -> DARK SHADE
u'\u2502' # 0xB3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0xB4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0xB5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0xB6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0xB7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0xB8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0xB9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0xBA -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0xBB -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0xBC -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0xBD -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0xBE -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0xBF -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0xC0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0xC1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0xC2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0xC3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0xC4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0xC5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0xC6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0xC7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0xC8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0xC9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0xCA -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0xCB -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0xCC -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0xCD -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0xCE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0xCF -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0xD0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0xD1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0xD2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0xD3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0xD4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0xD5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0xD6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0xD7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0xD8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0xD9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0xDA -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0xDB -> FULL BLOCK
u'\u2584' # 0xDC -> LOWER HALF BLOCK
u'\u258c' # 0xDD -> LEFT HALF BLOCK
u'\u2590' # 0xDE -> RIGHT HALF BLOCK
u'\u2580' # 0xDF -> UPPER HALF BLOCK
u'\u0636' # 0xE0 -> ARABIC LETTER DAD
u'\u0637' # 0xE1 -> ARABIC LETTER TAH
u'\u0638' # 0xE2 -> ARABIC LETTER ZAH
u'\u0639' # 0xE3 -> ARABIC LETTER AIN
u'\u063a' # 0xE4 -> ARABIC LETTER GHAIN
u'\u0641' # 0xE5 -> ARABIC LETTER FEH
u'\xb5' # 0xE6 -> MICRO SIGN
u'\u0642' # 0xE7 -> ARABIC LETTER QAF
u'\u0643' # 0xE8 -> ARABIC LETTER KAF
u'\u0644' # 0xE9 -> ARABIC LETTER LAM
u'\u0645' # 0xEA -> ARABIC LETTER MEEM
u'\u0646' # 0xEB -> ARABIC LETTER NOON
u'\u0647' # 0xEC -> ARABIC LETTER HEH
u'\u0648' # 0xED -> ARABIC LETTER WAW
u'\u0649' # 0xEE -> ARABIC LETTER ALEF MAKSURA
u'\u064a' # 0xEF -> ARABIC LETTER YEH
u'\u2261' # 0xF0 -> IDENTICAL TO
u'\u064b' # 0xF1 -> ARABIC FATHATAN
u'\u064c' # 0xF2 -> ARABIC DAMMATAN
u'\u064d' # 0xF3 -> ARABIC KASRATAN
u'\u064e' # 0xF4 -> ARABIC FATHA
u'\u064f' # 0xF5 -> ARABIC DAMMA
u'\u0650' # 0xF6 -> ARABIC KASRA
u'\u2248' # 0xF7 -> ALMOST EQUAL TO
u'\xb0' # 0xF8 -> DEGREE SIGN
u'\u2219' # 0xF9 -> BULLET OPERATOR
u'\xb7' # 0xFA -> MIDDLE DOT
u'\u221a' # 0xFB -> SQUARE ROOT
u'\u207f' # 0xFC -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0xFD -> SUPERSCRIPT TWO
u'\u25a0' # 0xFE -> BLACK SQUARE
u'\xa0' # 0xFF -> NO-BREAK SPACE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
magenta-aps/mox | oio_rest/oio_rest/db/db_helpers.py | 1 | 12383 | # Copyright (C) 2015-2019 Magenta ApS, https://magenta.dk.
# Contact: info@magenta.dk.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
""""Encapsulate details about the database structure."""
from collections import namedtuple
from urllib.parse import urlparse
from flask import request
from psycopg2._range import DateTimeTZRange
from psycopg2.extensions import adapt as psyco_adapt, ISQLQuote
from psycopg2.extensions import register_adapter as psyco_register_adapter
from .. import settings
from ..contentstore import content_store
from ..custom_exceptions import BadRequestException
_attribute_fields = {}
def get_attribute_fields(attribute_name):
"""Return the field names from the PostgreSQL type in question.
"""
if not _attribute_fields:
# Initialize attr fields for ease of use.
for c, fs in settings.REAL_DB_STRUCTURE.items():
for a, v in fs["attributter"].items():
_attribute_fields[c + a] = v + ['virkning']
return _attribute_fields[attribute_name.lower()]
def get_field_type(attribute_name, field_name):
for c, fs in settings.REAL_DB_STRUCTURE.items():
if "attributter_metadata" in fs:
for a, fs in fs["attributter_metadata"].items():
if attribute_name == c + a:
if field_name in fs and 'type' in fs[field_name]:
return fs[field_name]['type']
return "text"
_attribute_names = {}
def get_relation_field_type(class_name, field_name):
class_info = settings.REAL_DB_STRUCTURE[class_name.lower()]
if 'relationer_metadata' in class_info:
metadata = class_info['relationer_metadata']
for relation in metadata:
for key in metadata[relation]:
if field_name == key and 'type' in metadata[relation][key]:
return metadata[relation][key]['type']
return "text"
def get_attribute_names(class_name):
"Return the list of all recognized attributes for this class."
if not _attribute_names:
for c, fs in settings.REAL_DB_STRUCTURE.items():
# unfortunately, the ordering of attribute names is of
# semantic importance to the database code, and the
# ordering isn't consistent in Python 3.5
#
# specifically, the two state types of 'aktivitet' can
# trigger occasional errors
_attribute_names[c] = sorted(c + a for a in fs['attributter'])
return _attribute_names[class_name.lower()]
def get_state_names(class_name):
"Return the list of all recognized states for this class."
states = settings.REAL_DB_STRUCTURE[class_name.lower()]['tilstande']
if isinstance(states, list):
return [state[0] for state in states]
else:
return list(states)
_relation_names = {}
def get_relation_names(class_name):
"Return the list of all recognized relations for this class."
if len(_relation_names) == 0:
for c, fs in settings.REAL_DB_STRUCTURE.items():
_relation_names[c] = (
fs['relationer_nul_til_en'] +
fs['relationer_nul_til_mange']
)
return _relation_names[class_name.lower()]
def get_document_part_relation_names():
"""Return the list of all recognized relations for DokumentDel"""
return ["underredigeringaf"]
# Helper classers for adapting special types
Soegeord = namedtuple('KlasseSoegeordType', 'identifier description category')
OffentlighedUndtaget = namedtuple(
'OffentlighedUndtagetType', 'alternativtitel hjemmel'
)
JournalNotat = namedtuple('JournalNotatType', 'titel notat format')
JournalDokument = namedtuple(
'JournalPostDokumentAttrType', 'dokumenttitel offentlighedundtaget'
)
AktoerAttr = namedtuple(
'AktivitetAktoerAttr',
'accepteret obligatorisk repraesentation_uuid repraesentation_urn'
)
VaerdiRelationAttr = namedtuple(
'TilstandVaerdiRelationAttrType',
'forventet nominelvaerdi'
)
def input_list(_type, input, key):
"""Take a value with key from the input and return a list.
_type.input is called for each value in the list. If the key is not
found in the input, then None is returned."""
values = input.get(key, None)
if values is None:
return None
else:
return [_type.input(v) for v in values]
def input_dict_list(_type, input):
"""Take a dict input and return a generator.
Input is assumed to be a dict with list values.
_type.input is called for each value in the list corresponding to each
key. If the input is None, then None is returned."""
if input is None:
return None
else:
return [_type.input(k, v) for k in input for v in input[k]]
def to_bool(s):
"""Convert string to boolean. Passes through bool and None values."""
if isinstance(s, bool):
return s
elif s is None:
return None
else:
if s in ("True", "true", "1"):
return True
elif s in ("False", "false", "0"):
return False
raise ValueError("%s is not a valid boolean value" % s)
class Searchable(object):
"""Mixin class for searchable namedtuples."""
non_searchable_fields = ('virkning',)
@classmethod
def get_fields(cls):
"""Return tuple of searchable fields."""
if 'virkning' in cls._fields:
return tuple(set(cls._fields) - set(cls.non_searchable_fields))
else:
return cls._fields
class DokumentVariantType(namedtuple('DokumentVariantType',
'varianttekst egenskaber dele')):
@classmethod
def input(cls, i):
if i is None:
return None
return cls(
i.get("varianttekst", None),
input_list(DokumentVariantEgenskaberType, i, "egenskaber"),
input_list(DokumentDelType, i, "dele")
)
class DokumentVariantEgenskaberType(Searchable, namedtuple(
'DokumentVariantEgenskaberType',
'arkivering delvisscannet offentliggoerelse produktion virkning'
)):
@classmethod
def input(cls, i):
if i is None:
return None
return cls(
to_bool(i.get("arkivering", None)),
to_bool(i.get("delvisscannet", None)),
to_bool(i.get("offentliggoerelse", None)),
to_bool(i.get("produktion", None)),
Virkning.input(i.get("virkning", None))
)
class DokumentDelType(namedtuple(
'DokumentDelType',
'deltekst egenskaber relationer'
)):
@classmethod
def input(cls, i):
if i is None:
return None
return cls(
i.get('deltekst', None),
input_list(DokumentDelEgenskaberType, i, "egenskaber"),
input_dict_list(DokumentDelRelationType, i.get("relationer", None))
)
class Virkning(namedtuple('Virkning',
'timeperiod aktoerref aktoertypekode notetekst')):
@classmethod
def input(cls, i):
if i is None:
return None
return cls(
DateTimeTZRange(
i.get("from", None),
i.get("to", None)
),
i.get("aktoerref", None),
i.get("aktoertypekode", None),
i.get("notetekst", None)
)
class DokumentDelEgenskaberType(Searchable, namedtuple(
'DokumentDelEgenskaberType',
'indeks indhold lokation mimetype virkning'
)):
@classmethod
def _get_file_storage_for_content_url(cls, url):
"""
Return a FileStorage object for the form field specified by the URL.
The URL uses the scheme 'field', and its path points to a form field
which contains the uploaded file. For example, for a URL of 'field:f1',
this method would return the FileStorage object for the file
contained in form field 'f1'.
"""
o = urlparse(url)
if o.scheme == 'field':
field_name = o.path
file_obj = request.files.get(field_name, None)
if file_obj is None:
raise BadRequestException(
('The content URL "%s" referenced the field "%s", but it '
'was not present in the request.') % (url, o.path)
)
return file_obj
@classmethod
def input(cls, i):
if i is None:
return None
indhold = i.get('indhold', None)
# If the content URL is provided, and we are not doing a read
# operation, save the uploaded file
if indhold is not None and indhold != "" and request.method != 'GET':
try:
o = urlparse(indhold)
except ValueError:
raise BadRequestException(
"The parameter \"indhold\" contained "
"an invalid URL: \"%s\"" % indhold)
# If the user is uploading a file, then handle the upload
if o.scheme == 'field':
# Get FileStorage object referenced by indhold field
f = cls._get_file_storage_for_content_url(indhold)
# Save the file and get the URL for the saved file
indhold = content_store.save_file_object(f)
else:
# Otherwise, just accept whatever URL they pass.
pass
return cls(
i.get('indeks', None),
indhold,
i.get('lokation', None),
i.get('mimetype', None),
Virkning.input(i.get('virkning', None))
)
class DokumentDelRelationType(namedtuple(
'DokumentDelRelationType',
'reltype virkning relmaaluuid relmaalurn objekttype'
)):
@classmethod
def input(cls, key, i):
if i is None:
return None
return cls(
key,
Virkning.input(i.get('virkning', None)),
i.get('uuid', None),
i.get('urn', None),
i.get('objekttype', None),
)
class NamedTupleAdapter(object):
"""Adapt namedtuples, while performing a cast to the tuple's classname."""
def __init__(self, tuple_obj):
self._tuple_obj = tuple_obj
def __conform__(self, proto):
if proto is ISQLQuote:
return self
def prepare(self, conn):
self._conn = conn
def prepare_and_adapt(self, x):
x = psyco_adapt(x)
if hasattr(x, 'prepare'):
x.prepare(self._conn)
return x
def getquoted(self):
values = list(map(self.prepare_and_adapt, self._tuple_obj))
values = [v.getquoted() for v in values]
sql = (b'ROW(' + b','.join(values) + b') :: ' +
self._tuple_obj.__class__.__name__.encode('ascii'))
return sql
def __str__(self):
return self.getquoted()
class AktoerAttrAdapter(NamedTupleAdapter):
def getquoted(self):
values = list(map(self.prepare_and_adapt, self._tuple_obj))
values = [v.getquoted() for v in values]
qaa = AktoerAttr(*values) # quoted_aktoer_attr
values = [
qaa.obligatorisk + b'::AktivitetAktoerAttrObligatoriskKode',
qaa.accepteret + b'::AktivitetAktoerAttrAccepteretKode',
qaa.repraesentation_uuid + b'::uuid',
qaa.repraesentation_urn
]
sql = (b'ROW(' + b','.join(values) + b') :: ' +
self._tuple_obj.__class__.__name__.encode('ascii'))
return sql
psyco_register_adapter(Virkning, NamedTupleAdapter)
psyco_register_adapter(Soegeord, NamedTupleAdapter)
psyco_register_adapter(OffentlighedUndtaget, NamedTupleAdapter)
psyco_register_adapter(JournalNotat, NamedTupleAdapter)
psyco_register_adapter(JournalDokument, NamedTupleAdapter)
psyco_register_adapter(VaerdiRelationAttr, NamedTupleAdapter)
psyco_register_adapter(AktoerAttr, AktoerAttrAdapter)
# Dokument variants
psyco_register_adapter(DokumentVariantType, NamedTupleAdapter)
psyco_register_adapter(DokumentVariantEgenskaberType, NamedTupleAdapter)
# Dokument parts
psyco_register_adapter(DokumentDelType, NamedTupleAdapter)
psyco_register_adapter(DokumentDelEgenskaberType, NamedTupleAdapter)
psyco_register_adapter(DokumentDelRelationType, NamedTupleAdapter)
| mpl-2.0 |
omg-insa/server | django/contrib/gis/gdal/prototypes/errcheck.py | 404 | 4207 | """
This module houses the error-checking routines used by the GDAL
ctypes prototypes.
"""
from ctypes import c_void_p, string_at
from django.contrib.gis.gdal.error import check_err, OGRException, SRSException
from django.contrib.gis.gdal.libgdal import lgdal
# Helper routines for retrieving pointers and/or values from
# arguments passed in by reference.
def arg_byref(args, offset=-1):
"Returns the pointer argument's by-refernece value."
return args[offset]._obj.value
def ptr_byref(args, offset=-1):
"Returns the pointer argument passed in by-reference."
return args[offset]._obj
def check_bool(result, func, cargs):
"Returns the boolean evaluation of the value."
if bool(result): return True
else: return False
### String checking Routines ###
def check_const_string(result, func, cargs, offset=None):
"""
Similar functionality to `check_string`, but does not free the pointer.
"""
if offset:
check_err(result)
ptr = ptr_byref(cargs, offset)
return ptr.value
else:
return result
def check_string(result, func, cargs, offset=-1, str_result=False):
"""
Checks the string output returned from the given function, and frees
the string pointer allocated by OGR. The `str_result` keyword
may be used when the result is the string pointer, otherwise
the OGR error code is assumed. The `offset` keyword may be used
to extract the string pointer passed in by-reference at the given
slice offset in the function arguments.
"""
if str_result:
# For routines that return a string.
ptr = result
if not ptr: s = None
else: s = string_at(result)
else:
# Error-code return specified.
check_err(result)
ptr = ptr_byref(cargs, offset)
# Getting the string value
s = ptr.value
# Correctly freeing the allocated memory beind GDAL pointer
# w/the VSIFree routine.
if ptr: lgdal.VSIFree(ptr)
return s
### DataSource, Layer error-checking ###
### Envelope checking ###
def check_envelope(result, func, cargs, offset=-1):
"Checks a function that returns an OGR Envelope by reference."
env = ptr_byref(cargs, offset)
return env
### Geometry error-checking routines ###
def check_geom(result, func, cargs):
"Checks a function that returns a geometry."
# OGR_G_Clone may return an integer, even though the
# restype is set to c_void_p
if isinstance(result, (int, long)):
result = c_void_p(result)
if not result:
raise OGRException('Invalid geometry pointer returned from "%s".' % func.__name__)
return result
def check_geom_offset(result, func, cargs, offset=-1):
"Chcks the geometry at the given offset in the C parameter list."
check_err(result)
geom = ptr_byref(cargs, offset=offset)
return check_geom(geom, func, cargs)
### Spatial Reference error-checking routines ###
def check_srs(result, func, cargs):
if isinstance(result, (int, long)):
result = c_void_p(result)
if not result:
raise SRSException('Invalid spatial reference pointer returned from "%s".' % func.__name__)
return result
### Other error-checking routines ###
def check_arg_errcode(result, func, cargs):
"""
The error code is returned in the last argument, by reference.
Check its value with `check_err` before returning the result.
"""
check_err(arg_byref(cargs))
return result
def check_errcode(result, func, cargs):
"""
Check the error code returned (c_int).
"""
check_err(result)
return
def check_pointer(result, func, cargs):
"Makes sure the result pointer is valid."
if isinstance(result, (int, long)):
result = c_void_p(result)
if bool(result):
return result
else:
raise OGRException('Invalid pointer returned from "%s"' % func.__name__)
def check_str_arg(result, func, cargs):
"""
This is for the OSRGet[Angular|Linear]Units functions, which
require that the returned string pointer not be freed. This
returns both the double and tring values.
"""
dbl = result
ptr = cargs[-1]._obj
return dbl, ptr.value
| bsd-3-clause |
seem-sky/kbengine | kbe/res/scripts/common/Lib/encodings/cp1257.py | 272 | 13374 | """ Python Character Mapping Codec cp1257 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1257.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1257',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\ufffe' # 0x83 -> UNDEFINED
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\ufffe' # 0x88 -> UNDEFINED
'\u2030' # 0x89 -> PER MILLE SIGN
'\ufffe' # 0x8A -> UNDEFINED
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x8C -> UNDEFINED
'\xa8' # 0x8D -> DIAERESIS
'\u02c7' # 0x8E -> CARON
'\xb8' # 0x8F -> CEDILLA
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\ufffe' # 0x98 -> UNDEFINED
'\u2122' # 0x99 -> TRADE MARK SIGN
'\ufffe' # 0x9A -> UNDEFINED
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x9C -> UNDEFINED
'\xaf' # 0x9D -> MACRON
'\u02db' # 0x9E -> OGONEK
'\ufffe' # 0x9F -> UNDEFINED
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\ufffe' # 0xA1 -> UNDEFINED
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\ufffe' # 0xA5 -> UNDEFINED
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xd8' # 0xA8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u0156' # 0xAA -> LATIN CAPITAL LETTER R WITH CEDILLA
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xc6' # 0xAF -> LATIN CAPITAL LETTER AE
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xf8' # 0xB8 -> LATIN SMALL LETTER O WITH STROKE
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\u0157' # 0xBA -> LATIN SMALL LETTER R WITH CEDILLA
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xe6' # 0xBF -> LATIN SMALL LETTER AE
'\u0104' # 0xC0 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u012e' # 0xC1 -> LATIN CAPITAL LETTER I WITH OGONEK
'\u0100' # 0xC2 -> LATIN CAPITAL LETTER A WITH MACRON
'\u0106' # 0xC3 -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\u0118' # 0xC6 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0112' # 0xC7 -> LATIN CAPITAL LETTER E WITH MACRON
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0179' # 0xCA -> LATIN CAPITAL LETTER Z WITH ACUTE
'\u0116' # 0xCB -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\u0122' # 0xCC -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u0136' # 0xCD -> LATIN CAPITAL LETTER K WITH CEDILLA
'\u012a' # 0xCE -> LATIN CAPITAL LETTER I WITH MACRON
'\u013b' # 0xCF -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u0160' # 0xD0 -> LATIN CAPITAL LETTER S WITH CARON
'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0145' # 0xD2 -> LATIN CAPITAL LETTER N WITH CEDILLA
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\u014c' # 0xD4 -> LATIN CAPITAL LETTER O WITH MACRON
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\u0172' # 0xD8 -> LATIN CAPITAL LETTER U WITH OGONEK
'\u0141' # 0xD9 -> LATIN CAPITAL LETTER L WITH STROKE
'\u015a' # 0xDA -> LATIN CAPITAL LETTER S WITH ACUTE
'\u016a' # 0xDB -> LATIN CAPITAL LETTER U WITH MACRON
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u017b' # 0xDD -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017d' # 0xDE -> LATIN CAPITAL LETTER Z WITH CARON
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\u0105' # 0xE0 -> LATIN SMALL LETTER A WITH OGONEK
'\u012f' # 0xE1 -> LATIN SMALL LETTER I WITH OGONEK
'\u0101' # 0xE2 -> LATIN SMALL LETTER A WITH MACRON
'\u0107' # 0xE3 -> LATIN SMALL LETTER C WITH ACUTE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\u0119' # 0xE6 -> LATIN SMALL LETTER E WITH OGONEK
'\u0113' # 0xE7 -> LATIN SMALL LETTER E WITH MACRON
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u017a' # 0xEA -> LATIN SMALL LETTER Z WITH ACUTE
'\u0117' # 0xEB -> LATIN SMALL LETTER E WITH DOT ABOVE
'\u0123' # 0xEC -> LATIN SMALL LETTER G WITH CEDILLA
'\u0137' # 0xED -> LATIN SMALL LETTER K WITH CEDILLA
'\u012b' # 0xEE -> LATIN SMALL LETTER I WITH MACRON
'\u013c' # 0xEF -> LATIN SMALL LETTER L WITH CEDILLA
'\u0161' # 0xF0 -> LATIN SMALL LETTER S WITH CARON
'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
'\u0146' # 0xF2 -> LATIN SMALL LETTER N WITH CEDILLA
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\u014d' # 0xF4 -> LATIN SMALL LETTER O WITH MACRON
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\u0173' # 0xF8 -> LATIN SMALL LETTER U WITH OGONEK
'\u0142' # 0xF9 -> LATIN SMALL LETTER L WITH STROKE
'\u015b' # 0xFA -> LATIN SMALL LETTER S WITH ACUTE
'\u016b' # 0xFB -> LATIN SMALL LETTER U WITH MACRON
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u017e' # 0xFE -> LATIN SMALL LETTER Z WITH CARON
'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| lgpl-3.0 |
Sup3Roque/Pancas | plugin.video.loganaddon/resources/lib/libraries/cloudflare.py | 21 | 4174 |
#
# Copyright (C) 2015 tknorris (Derived from Mikey1234's & Lambda's)
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
# This code is a derivative of the YouTube plugin for XBMC and associated works
# released under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3
import re,urllib,urlparse,time
from resources.lib.libraries import cache
from resources.lib.libraries import client
from resources.lib.libraries import control
def request(url, post=None, headers=None, mobile=False, safe=False, timeout='30'):
try:
control.log('[cloudflare] request %s' % url)
try: headers.update(headers)
except: headers = {}
agent = cache.get(cloudflareAgent, 168)
if not 'User-Agent' in headers: headers['User-Agent'] = agent
u = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
cookie = cache.get(cloudflareCookie, 168, u, post, headers, mobile, safe, timeout)
result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, output='response', error=True)
if result[0] == '503':
agent = cache.get(cloudflareAgent, 0) ; headers['User-Agent'] = agent
cookie = cache.get(cloudflareCookie, 0, u, post, headers, mobile, safe, timeout)
result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout)
else:
result= result[1]
#control.log('[cloudflare] result %s' % result)
return result
except:
return
def source(url, post=None, headers=None, mobile=False, safe=False, timeout='30'):
return request(url, post, headers, mobile, safe, timeout)
def cloudflareAgent():
return client.randomagent()
def cloudflareCookie(url, post, headers, mobile, safe, timeout):
try:
result = client.request(url, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, error=True)
jschl = re.compile('name="jschl_vc" value="(.+?)"/>').findall(result)[0]
init = re.compile('setTimeout\(function\(\){\s*.*?.*:(.*?)};').findall(result)[0]
builder = re.compile(r"challenge-form\'\);\s*(.*)a.v").findall(result)[0]
decryptVal = parseJSString(init)
lines = builder.split(';')
for line in lines:
if len(line)>0 and '=' in line:
sections=line.split('=')
line_val = parseJSString(sections[1])
decryptVal = int(eval(str(decryptVal)+sections[0][-1]+str(line_val)))
answer = decryptVal + len(urlparse.urlparse(url).netloc)
query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (url, jschl, answer)
if 'type="hidden" name="pass"' in result:
passval = re.compile('name="pass" value="(.*?)"').findall(result)[0]
query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % (url, urllib.quote_plus(passval), jschl, answer)
time.sleep(5)
cookie = client.request(query, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, output='cookie', error=True)
return cookie
except:
pass
def parseJSString(s):
try:
offset=1 if s[0]=='+' else 0
val = int(eval(s.replace('!+[]','1').replace('!![]','1').replace('[]','0').replace('(','str(')[offset:]))
return val
except:
pass
| gpl-2.0 |
malie/theano-rbm-on-word-tuples | read.py | 1 | 1782 | import re
from sklearn.feature_extraction.text import CountVectorizer
def find_common_words(all_words, num_most_frequent_words):
vectorizer = CountVectorizer(
stop_words=None, # 'english',
max_features=num_most_frequent_words,
binary=True)
vectorizer.fit(all_words)
return (vectorizer.vocabulary_, vectorizer.get_feature_names())
def read_odyssey_tuples(tuplesize,
num_most_frequent_words,
verbose=False):
with open('pg1727-part.txt', 'r') as file:
text = re.findall(r'[a-zA-Z]+', file.read())
(common_voc, common_names) = find_common_words(text, num_most_frequent_words)
print(common_voc)
print(common_names)
res = []
dist = 12
for i in range(len(text)-dist):
first_word = text[i]
if first_word in common_voc:
a = common_voc[first_word]
tuple = [a]
for j in range(dist):
next_word = text[i+1+j]
if next_word in common_voc:
n = common_voc[next_word]
tuple.append(n)
if len(tuple) == tuplesize:
res.append(tuple)
if verbose and i < 200:
print(tuple)
print('from ', text[i:i+2+j])
break
return (res, common_names)
if __name__ == "__main__":
num_words = 20
(tuples, words) = read_odyssey_tuples(3, num_words, verbose=True)
print('number of common word tuples: ', len(tuples))
for s in range(10):
for i in tuples[s]:
print(i, words[i])
print('')
ts = set([(a,b,c) for a,b,c in tuples])
print('distinct word tuples: ', len(ts))
| bsd-3-clause |
madslonnberg/blog | node_modules/pygmentize-bundled/vendor/pygments/pygments/lexers/functional.py | 197 | 113704 | # -*- coding: utf-8 -*-
"""
pygments.lexers.functional
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for functional languages.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, include, do_insertions
from pygments.token import Text, Comment, Operator, Keyword, Name, \
String, Number, Punctuation, Literal, Generic, Error
__all__ = ['RacketLexer', 'SchemeLexer', 'CommonLispLexer', 'HaskellLexer',
'AgdaLexer', 'LiterateHaskellLexer', 'LiterateAgdaLexer',
'SMLLexer', 'OcamlLexer', 'ErlangLexer', 'ErlangShellLexer',
'OpaLexer', 'CoqLexer', 'NewLispLexer', 'ElixirLexer',
'ElixirConsoleLexer', 'KokaLexer']
line_re = re.compile('.*?\n')
class RacketLexer(RegexLexer):
"""
Lexer for `Racket <http://racket-lang.org/>`_ source code (formerly known as
PLT Scheme).
*New in Pygments 1.6.*
"""
name = 'Racket'
aliases = ['racket', 'rkt']
filenames = ['*.rkt', '*.rktl']
mimetypes = ['text/x-racket', 'application/x-racket']
# From namespace-mapped-symbols
keywords = [
'#%app', '#%datum', '#%expression', '#%module-begin',
'#%plain-app', '#%plain-lambda', '#%plain-module-begin',
'#%provide', '#%require', '#%stratified-body', '#%top',
'#%top-interaction', '#%variable-reference', '...', 'and', 'begin',
'begin-for-syntax', 'begin0', 'case', 'case-lambda', 'cond',
'datum->syntax-object', 'define', 'define-for-syntax',
'define-struct', 'define-syntax', 'define-syntax-rule',
'define-syntaxes', 'define-values', 'define-values-for-syntax',
'delay', 'do', 'expand-path', 'fluid-let', 'hash-table-copy',
'hash-table-count', 'hash-table-for-each', 'hash-table-get',
'hash-table-iterate-first', 'hash-table-iterate-key',
'hash-table-iterate-next', 'hash-table-iterate-value',
'hash-table-map', 'hash-table-put!', 'hash-table-remove!',
'hash-table?', 'if', 'lambda', 'let', 'let*', 'let*-values',
'let-struct', 'let-syntax', 'let-syntaxes', 'let-values', 'let/cc',
'let/ec', 'letrec', 'letrec-syntax', 'letrec-syntaxes',
'letrec-syntaxes+values', 'letrec-values', 'list-immutable',
'make-hash-table', 'make-immutable-hash-table', 'make-namespace',
'module', 'module-identifier=?', 'module-label-identifier=?',
'module-template-identifier=?', 'module-transformer-identifier=?',
'namespace-transformer-require', 'or', 'parameterize',
'parameterize*', 'parameterize-break', 'provide',
'provide-for-label', 'provide-for-syntax', 'quasiquote',
'quasisyntax', 'quasisyntax/loc', 'quote', 'quote-syntax',
'quote-syntax/prune', 'require', 'require-for-label',
'require-for-syntax', 'require-for-template', 'set!',
'set!-values', 'syntax', 'syntax-case', 'syntax-case*',
'syntax-id-rules', 'syntax-object->datum', 'syntax-rules',
'syntax/loc', 'time', 'transcript-off', 'transcript-on', 'unless',
'unquote', 'unquote-splicing', 'unsyntax', 'unsyntax-splicing',
'when', 'with-continuation-mark', 'with-handlers',
'with-handlers*', 'with-syntax', 'λ'
]
# From namespace-mapped-symbols
builtins = [
'*', '+', '-', '/', '<', '<=', '=', '>', '>=',
'abort-current-continuation', 'abs', 'absolute-path?', 'acos',
'add1', 'alarm-evt', 'always-evt', 'andmap', 'angle', 'append',
'apply', 'arithmetic-shift', 'arity-at-least',
'arity-at-least-value', 'arity-at-least?', 'asin', 'assoc', 'assq',
'assv', 'atan', 'banner', 'bitwise-and', 'bitwise-bit-field',
'bitwise-bit-set?', 'bitwise-ior', 'bitwise-not', 'bitwise-xor',
'boolean?', 'bound-identifier=?', 'box', 'box-immutable', 'box?',
'break-enabled', 'break-thread', 'build-path',
'build-path/convention-type', 'byte-pregexp', 'byte-pregexp?',
'byte-ready?', 'byte-regexp', 'byte-regexp?', 'byte?', 'bytes',
'bytes->immutable-bytes', 'bytes->list', 'bytes->path',
'bytes->path-element', 'bytes->string/latin-1',
'bytes->string/locale', 'bytes->string/utf-8', 'bytes-append',
'bytes-close-converter', 'bytes-convert', 'bytes-convert-end',
'bytes-converter?', 'bytes-copy', 'bytes-copy!', 'bytes-fill!',
'bytes-length', 'bytes-open-converter', 'bytes-ref', 'bytes-set!',
'bytes-utf-8-index', 'bytes-utf-8-length', 'bytes-utf-8-ref',
'bytes<?', 'bytes=?', 'bytes>?', 'bytes?', 'caaaar', 'caaadr',
'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr',
'cadar', 'caddar', 'cadddr', 'caddr', 'cadr',
'call-in-nested-thread', 'call-with-break-parameterization',
'call-with-composable-continuation',
'call-with-continuation-barrier', 'call-with-continuation-prompt',
'call-with-current-continuation', 'call-with-escape-continuation',
'call-with-exception-handler',
'call-with-immediate-continuation-mark', 'call-with-input-file',
'call-with-output-file', 'call-with-parameterization',
'call-with-semaphore', 'call-with-semaphore/enable-break',
'call-with-values', 'call/cc', 'call/ec', 'car', 'cdaaar',
'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar', 'cddaar',
'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr',
'ceiling', 'channel-get', 'channel-put', 'channel-put-evt',
'channel-try-get', 'channel?', 'chaperone-box', 'chaperone-evt',
'chaperone-hash', 'chaperone-of?', 'chaperone-procedure',
'chaperone-struct', 'chaperone-struct-type', 'chaperone-vector',
'chaperone?', 'char->integer', 'char-alphabetic?', 'char-blank?',
'char-ci<=?', 'char-ci<?', 'char-ci=?', 'char-ci>=?', 'char-ci>?',
'char-downcase', 'char-foldcase', 'char-general-category',
'char-graphic?', 'char-iso-control?', 'char-lower-case?',
'char-numeric?', 'char-punctuation?', 'char-ready?',
'char-symbolic?', 'char-title-case?', 'char-titlecase',
'char-upcase', 'char-upper-case?', 'char-utf-8-length',
'char-whitespace?', 'char<=?', 'char<?', 'char=?', 'char>=?',
'char>?', 'char?', 'check-duplicate-identifier',
'checked-procedure-check-and-extract', 'choice-evt',
'cleanse-path', 'close-input-port', 'close-output-port',
'collect-garbage', 'collection-file-path', 'collection-path',
'compile', 'compile-allow-set!-undefined',
'compile-context-preservation-enabled',
'compile-enforce-module-constants', 'compile-syntax',
'compiled-expression?', 'compiled-module-expression?',
'complete-path?', 'complex?', 'cons',
'continuation-mark-set->context', 'continuation-mark-set->list',
'continuation-mark-set->list*', 'continuation-mark-set-first',
'continuation-mark-set?', 'continuation-marks',
'continuation-prompt-available?', 'continuation-prompt-tag?',
'continuation?', 'copy-file', 'cos',
'current-break-parameterization', 'current-code-inspector',
'current-command-line-arguments', 'current-compile',
'current-continuation-marks', 'current-custodian',
'current-directory', 'current-drive', 'current-error-port',
'current-eval', 'current-evt-pseudo-random-generator',
'current-gc-milliseconds', 'current-get-interaction-input-port',
'current-inexact-milliseconds', 'current-input-port',
'current-inspector', 'current-library-collection-paths',
'current-load', 'current-load-extension',
'current-load-relative-directory', 'current-load/use-compiled',
'current-locale', 'current-memory-use', 'current-milliseconds',
'current-module-declare-name', 'current-module-declare-source',
'current-module-name-resolver', 'current-namespace',
'current-output-port', 'current-parameterization',
'current-preserved-thread-cell-values', 'current-print',
'current-process-milliseconds', 'current-prompt-read',
'current-pseudo-random-generator', 'current-read-interaction',
'current-reader-guard', 'current-readtable', 'current-seconds',
'current-security-guard', 'current-subprocess-custodian-mode',
'current-thread', 'current-thread-group',
'current-thread-initial-stack-size',
'current-write-relative-directory', 'custodian-box-value',
'custodian-box?', 'custodian-limit-memory',
'custodian-managed-list', 'custodian-memory-accounting-available?',
'custodian-require-memory', 'custodian-shutdown-all', 'custodian?',
'custom-print-quotable-accessor', 'custom-print-quotable?',
'custom-write-accessor', 'custom-write?', 'date', 'date*',
'date*-nanosecond', 'date*-time-zone-name', 'date*?', 'date-day',
'date-dst?', 'date-hour', 'date-minute', 'date-month',
'date-second', 'date-time-zone-offset', 'date-week-day',
'date-year', 'date-year-day', 'date?', 'datum-intern-literal',
'default-continuation-prompt-tag', 'delete-directory',
'delete-file', 'denominator', 'directory-exists?',
'directory-list', 'display', 'displayln', 'dump-memory-stats',
'dynamic-require', 'dynamic-require-for-syntax', 'dynamic-wind',
'eof', 'eof-object?', 'ephemeron-value', 'ephemeron?', 'eprintf',
'eq-hash-code', 'eq?', 'equal-hash-code',
'equal-secondary-hash-code', 'equal?', 'equal?/recur',
'eqv-hash-code', 'eqv?', 'error', 'error-display-handler',
'error-escape-handler', 'error-print-context-length',
'error-print-source-location', 'error-print-width',
'error-value->string-handler', 'eval', 'eval-jit-enabled',
'eval-syntax', 'even?', 'evt?', 'exact->inexact', 'exact-integer?',
'exact-nonnegative-integer?', 'exact-positive-integer?', 'exact?',
'executable-yield-handler', 'exit', 'exit-handler', 'exn',
'exn-continuation-marks', 'exn-message', 'exn:break',
'exn:break-continuation', 'exn:break?', 'exn:fail',
'exn:fail:contract', 'exn:fail:contract:arity',
'exn:fail:contract:arity?', 'exn:fail:contract:continuation',
'exn:fail:contract:continuation?',
'exn:fail:contract:divide-by-zero',
'exn:fail:contract:divide-by-zero?',
'exn:fail:contract:non-fixnum-result',
'exn:fail:contract:non-fixnum-result?',
'exn:fail:contract:variable', 'exn:fail:contract:variable-id',
'exn:fail:contract:variable?', 'exn:fail:contract?',
'exn:fail:filesystem', 'exn:fail:filesystem:exists',
'exn:fail:filesystem:exists?', 'exn:fail:filesystem:version',
'exn:fail:filesystem:version?', 'exn:fail:filesystem?',
'exn:fail:network', 'exn:fail:network?', 'exn:fail:out-of-memory',
'exn:fail:out-of-memory?', 'exn:fail:read',
'exn:fail:read-srclocs', 'exn:fail:read:eof', 'exn:fail:read:eof?',
'exn:fail:read:non-char', 'exn:fail:read:non-char?',
'exn:fail:read?', 'exn:fail:syntax', 'exn:fail:syntax-exprs',
'exn:fail:syntax:unbound', 'exn:fail:syntax:unbound?',
'exn:fail:syntax?', 'exn:fail:unsupported',
'exn:fail:unsupported?', 'exn:fail:user', 'exn:fail:user?',
'exn:fail?', 'exn:srclocs-accessor', 'exn:srclocs?', 'exn?', 'exp',
'expand', 'expand-once', 'expand-syntax', 'expand-syntax-once',
'expand-syntax-to-top-form', 'expand-to-top-form',
'expand-user-path', 'expt', 'file-exists?',
'file-or-directory-identity', 'file-or-directory-modify-seconds',
'file-or-directory-permissions', 'file-position', 'file-size',
'file-stream-buffer-mode', 'file-stream-port?',
'filesystem-root-list', 'find-executable-path',
'find-library-collection-paths', 'find-system-path', 'fixnum?',
'floating-point-bytes->real', 'flonum?', 'floor', 'flush-output',
'for-each', 'force', 'format', 'fprintf', 'free-identifier=?',
'gcd', 'generate-temporaries', 'gensym', 'get-output-bytes',
'get-output-string', 'getenv', 'global-port-print-handler',
'guard-evt', 'handle-evt', 'handle-evt?', 'hash', 'hash-equal?',
'hash-eqv?', 'hash-has-key?', 'hash-placeholder?', 'hash-ref!',
'hasheq', 'hasheqv', 'identifier-binding',
'identifier-label-binding', 'identifier-prune-lexical-context',
'identifier-prune-to-source-module',
'identifier-remove-from-definition-context',
'identifier-template-binding', 'identifier-transformer-binding',
'identifier?', 'imag-part', 'immutable?', 'impersonate-box',
'impersonate-hash', 'impersonate-procedure', 'impersonate-struct',
'impersonate-vector', 'impersonator-of?',
'impersonator-prop:application-mark',
'impersonator-property-accessor-procedure?',
'impersonator-property?', 'impersonator?', 'inexact->exact',
'inexact-real?', 'inexact?', 'input-port?', 'inspector?',
'integer->char', 'integer->integer-bytes',
'integer-bytes->integer', 'integer-length', 'integer-sqrt',
'integer-sqrt/remainder', 'integer?',
'internal-definition-context-seal', 'internal-definition-context?',
'keyword->string', 'keyword<?', 'keyword?', 'kill-thread', 'lcm',
'length', 'liberal-define-context?', 'link-exists?', 'list',
'list*', 'list->bytes', 'list->string', 'list->vector', 'list-ref',
'list-tail', 'list?', 'load', 'load-extension',
'load-on-demand-enabled', 'load-relative',
'load-relative-extension', 'load/cd', 'load/use-compiled',
'local-expand', 'local-expand/capture-lifts',
'local-transformer-expand',
'local-transformer-expand/capture-lifts', 'locale-string-encoding',
'log', 'magnitude', 'make-arity-at-least', 'make-bytes',
'make-channel', 'make-continuation-prompt-tag', 'make-custodian',
'make-custodian-box', 'make-date', 'make-date*',
'make-derived-parameter', 'make-directory', 'make-ephemeron',
'make-exn', 'make-exn:break', 'make-exn:fail',
'make-exn:fail:contract', 'make-exn:fail:contract:arity',
'make-exn:fail:contract:continuation',
'make-exn:fail:contract:divide-by-zero',
'make-exn:fail:contract:non-fixnum-result',
'make-exn:fail:contract:variable', 'make-exn:fail:filesystem',
'make-exn:fail:filesystem:exists',
'make-exn:fail:filesystem:version', 'make-exn:fail:network',
'make-exn:fail:out-of-memory', 'make-exn:fail:read',
'make-exn:fail:read:eof', 'make-exn:fail:read:non-char',
'make-exn:fail:syntax', 'make-exn:fail:syntax:unbound',
'make-exn:fail:unsupported', 'make-exn:fail:user',
'make-file-or-directory-link', 'make-hash-placeholder',
'make-hasheq-placeholder', 'make-hasheqv',
'make-hasheqv-placeholder', 'make-immutable-hasheqv',
'make-impersonator-property', 'make-input-port', 'make-inspector',
'make-known-char-range-list', 'make-output-port', 'make-parameter',
'make-pipe', 'make-placeholder', 'make-polar',
'make-prefab-struct', 'make-pseudo-random-generator',
'make-reader-graph', 'make-readtable', 'make-rectangular',
'make-rename-transformer', 'make-resolved-module-path',
'make-security-guard', 'make-semaphore', 'make-set!-transformer',
'make-shared-bytes', 'make-sibling-inspector',
'make-special-comment', 'make-srcloc', 'make-string',
'make-struct-field-accessor', 'make-struct-field-mutator',
'make-struct-type', 'make-struct-type-property',
'make-syntax-delta-introducer', 'make-syntax-introducer',
'make-thread-cell', 'make-thread-group', 'make-vector',
'make-weak-box', 'make-weak-hasheqv', 'make-will-executor', 'map',
'max', 'mcar', 'mcdr', 'mcons', 'member', 'memq', 'memv', 'min',
'module->exports', 'module->imports', 'module->language-info',
'module->namespace', 'module-compiled-exports',
'module-compiled-imports', 'module-compiled-language-info',
'module-compiled-name', 'module-path-index-join',
'module-path-index-resolve', 'module-path-index-split',
'module-path-index?', 'module-path?', 'module-predefined?',
'module-provide-protected?', 'modulo', 'mpair?', 'nack-guard-evt',
'namespace-attach-module', 'namespace-attach-module-declaration',
'namespace-base-phase', 'namespace-mapped-symbols',
'namespace-module-identifier', 'namespace-module-registry',
'namespace-require', 'namespace-require/constant',
'namespace-require/copy', 'namespace-require/expansion-time',
'namespace-set-variable-value!', 'namespace-symbol->identifier',
'namespace-syntax-introduce', 'namespace-undefine-variable!',
'namespace-unprotect-module', 'namespace-variable-value',
'namespace?', 'negative?', 'never-evt', 'newline',
'normal-case-path', 'not', 'null', 'null?', 'number->string',
'number?', 'numerator', 'object-name', 'odd?', 'open-input-bytes',
'open-input-file', 'open-input-output-file', 'open-input-string',
'open-output-bytes', 'open-output-file', 'open-output-string',
'ormap', 'output-port?', 'pair?', 'parameter-procedure=?',
'parameter?', 'parameterization?', 'path->bytes',
'path->complete-path', 'path->directory-path', 'path->string',
'path-add-suffix', 'path-convention-type', 'path-element->bytes',
'path-element->string', 'path-for-some-system?',
'path-list-string->path-list', 'path-replace-suffix',
'path-string?', 'path?', 'peek-byte', 'peek-byte-or-special',
'peek-bytes', 'peek-bytes!', 'peek-bytes-avail!',
'peek-bytes-avail!*', 'peek-bytes-avail!/enable-break',
'peek-char', 'peek-char-or-special', 'peek-string', 'peek-string!',
'pipe-content-length', 'placeholder-get', 'placeholder-set!',
'placeholder?', 'poll-guard-evt', 'port-closed-evt',
'port-closed?', 'port-commit-peeked', 'port-count-lines!',
'port-count-lines-enabled', 'port-display-handler',
'port-file-identity', 'port-file-unlock', 'port-next-location',
'port-print-handler', 'port-progress-evt',
'port-provides-progress-evts?', 'port-read-handler',
'port-try-file-lock?', 'port-write-handler', 'port-writes-atomic?',
'port-writes-special?', 'port?', 'positive?',
'prefab-key->struct-type', 'prefab-struct-key', 'pregexp',
'pregexp?', 'primitive-closure?', 'primitive-result-arity',
'primitive?', 'print', 'print-as-expression',
'print-boolean-long-form', 'print-box', 'print-graph',
'print-hash-table', 'print-mpair-curly-braces',
'print-pair-curly-braces', 'print-reader-abbreviations',
'print-struct', 'print-syntax-width', 'print-unreadable',
'print-vector-length', 'printf', 'procedure->method',
'procedure-arity', 'procedure-arity-includes?', 'procedure-arity?',
'procedure-closure-contents-eq?', 'procedure-extract-target',
'procedure-reduce-arity', 'procedure-rename',
'procedure-struct-type?', 'procedure?', 'promise?',
'prop:arity-string', 'prop:checked-procedure',
'prop:custom-print-quotable', 'prop:custom-write',
'prop:equal+hash', 'prop:evt', 'prop:exn:srclocs',
'prop:impersonator-of', 'prop:input-port',
'prop:liberal-define-context', 'prop:output-port',
'prop:procedure', 'prop:rename-transformer',
'prop:set!-transformer', 'pseudo-random-generator->vector',
'pseudo-random-generator-vector?', 'pseudo-random-generator?',
'putenv', 'quotient', 'quotient/remainder', 'raise',
'raise-arity-error', 'raise-mismatch-error', 'raise-syntax-error',
'raise-type-error', 'raise-user-error', 'random', 'random-seed',
'rational?', 'rationalize', 'read', 'read-accept-bar-quote',
'read-accept-box', 'read-accept-compiled', 'read-accept-dot',
'read-accept-graph', 'read-accept-infix-dot', 'read-accept-lang',
'read-accept-quasiquote', 'read-accept-reader', 'read-byte',
'read-byte-or-special', 'read-bytes', 'read-bytes!',
'read-bytes-avail!', 'read-bytes-avail!*',
'read-bytes-avail!/enable-break', 'read-bytes-line',
'read-case-sensitive', 'read-char', 'read-char-or-special',
'read-curly-brace-as-paren', 'read-decimal-as-inexact',
'read-eval-print-loop', 'read-language', 'read-line',
'read-on-demand-source', 'read-square-bracket-as-paren',
'read-string', 'read-string!', 'read-syntax',
'read-syntax/recursive', 'read/recursive', 'readtable-mapping',
'readtable?', 'real->double-flonum', 'real->floating-point-bytes',
'real->single-flonum', 'real-part', 'real?', 'regexp',
'regexp-match', 'regexp-match-peek', 'regexp-match-peek-immediate',
'regexp-match-peek-positions',
'regexp-match-peek-positions-immediate',
'regexp-match-peek-positions-immediate/end',
'regexp-match-peek-positions/end', 'regexp-match-positions',
'regexp-match-positions/end', 'regexp-match/end', 'regexp-match?',
'regexp-max-lookbehind', 'regexp-replace', 'regexp-replace*',
'regexp?', 'relative-path?', 'remainder',
'rename-file-or-directory', 'rename-transformer-target',
'rename-transformer?', 'resolve-path', 'resolved-module-path-name',
'resolved-module-path?', 'reverse', 'round', 'seconds->date',
'security-guard?', 'semaphore-peek-evt', 'semaphore-post',
'semaphore-try-wait?', 'semaphore-wait',
'semaphore-wait/enable-break', 'semaphore?',
'set!-transformer-procedure', 'set!-transformer?', 'set-box!',
'set-mcar!', 'set-mcdr!', 'set-port-next-location!',
'shared-bytes', 'shell-execute', 'simplify-path', 'sin',
'single-flonum?', 'sleep', 'special-comment-value',
'special-comment?', 'split-path', 'sqrt', 'srcloc',
'srcloc-column', 'srcloc-line', 'srcloc-position', 'srcloc-source',
'srcloc-span', 'srcloc?', 'string', 'string->bytes/latin-1',
'string->bytes/locale', 'string->bytes/utf-8',
'string->immutable-string', 'string->keyword', 'string->list',
'string->number', 'string->path', 'string->path-element',
'string->symbol', 'string->uninterned-symbol',
'string->unreadable-symbol', 'string-append', 'string-ci<=?',
'string-ci<?', 'string-ci=?', 'string-ci>=?', 'string-ci>?',
'string-copy', 'string-copy!', 'string-downcase', 'string-fill!',
'string-foldcase', 'string-length', 'string-locale-ci<?',
'string-locale-ci=?', 'string-locale-ci>?',
'string-locale-downcase', 'string-locale-upcase',
'string-locale<?', 'string-locale=?', 'string-locale>?',
'string-normalize-nfc', 'string-normalize-nfd',
'string-normalize-nfkc', 'string-normalize-nfkd', 'string-ref',
'string-set!', 'string-titlecase', 'string-upcase',
'string-utf-8-length', 'string<=?', 'string<?', 'string=?',
'string>=?', 'string>?', 'string?', 'struct->vector',
'struct-accessor-procedure?', 'struct-constructor-procedure?',
'struct-info', 'struct-mutator-procedure?',
'struct-predicate-procedure?', 'struct-type-info',
'struct-type-make-constructor', 'struct-type-make-predicate',
'struct-type-property-accessor-procedure?',
'struct-type-property?', 'struct-type?', 'struct:arity-at-least',
'struct:date', 'struct:date*', 'struct:exn', 'struct:exn:break',
'struct:exn:fail', 'struct:exn:fail:contract',
'struct:exn:fail:contract:arity',
'struct:exn:fail:contract:continuation',
'struct:exn:fail:contract:divide-by-zero',
'struct:exn:fail:contract:non-fixnum-result',
'struct:exn:fail:contract:variable', 'struct:exn:fail:filesystem',
'struct:exn:fail:filesystem:exists',
'struct:exn:fail:filesystem:version', 'struct:exn:fail:network',
'struct:exn:fail:out-of-memory', 'struct:exn:fail:read',
'struct:exn:fail:read:eof', 'struct:exn:fail:read:non-char',
'struct:exn:fail:syntax', 'struct:exn:fail:syntax:unbound',
'struct:exn:fail:unsupported', 'struct:exn:fail:user',
'struct:srcloc', 'struct?', 'sub1', 'subbytes', 'subprocess',
'subprocess-group-enabled', 'subprocess-kill', 'subprocess-pid',
'subprocess-status', 'subprocess-wait', 'subprocess?', 'substring',
'symbol->string', 'symbol-interned?', 'symbol-unreadable?',
'symbol?', 'sync', 'sync/enable-break', 'sync/timeout',
'sync/timeout/enable-break', 'syntax->list', 'syntax-arm',
'syntax-column', 'syntax-disarm', 'syntax-e', 'syntax-line',
'syntax-local-bind-syntaxes', 'syntax-local-certifier',
'syntax-local-context', 'syntax-local-expand-expression',
'syntax-local-get-shadower', 'syntax-local-introduce',
'syntax-local-lift-context', 'syntax-local-lift-expression',
'syntax-local-lift-module-end-declaration',
'syntax-local-lift-provide', 'syntax-local-lift-require',
'syntax-local-lift-values-expression',
'syntax-local-make-definition-context',
'syntax-local-make-delta-introducer',
'syntax-local-module-defined-identifiers',
'syntax-local-module-exports',
'syntax-local-module-required-identifiers', 'syntax-local-name',
'syntax-local-phase-level',
'syntax-local-transforming-module-provides?', 'syntax-local-value',
'syntax-local-value/immediate', 'syntax-original?',
'syntax-position', 'syntax-property',
'syntax-property-symbol-keys', 'syntax-protect', 'syntax-rearm',
'syntax-recertify', 'syntax-shift-phase-level', 'syntax-source',
'syntax-source-module', 'syntax-span', 'syntax-taint',
'syntax-tainted?', 'syntax-track-origin',
'syntax-transforming-module-expression?', 'syntax-transforming?',
'syntax?', 'system-big-endian?', 'system-idle-evt',
'system-language+country', 'system-library-subpath',
'system-path-convention-type', 'system-type', 'tan',
'tcp-abandon-port', 'tcp-accept', 'tcp-accept-evt',
'tcp-accept-ready?', 'tcp-accept/enable-break', 'tcp-addresses',
'tcp-close', 'tcp-connect', 'tcp-connect/enable-break',
'tcp-listen', 'tcp-listener?', 'tcp-port?', 'terminal-port?',
'thread', 'thread-cell-ref', 'thread-cell-set!', 'thread-cell?',
'thread-dead-evt', 'thread-dead?', 'thread-group?',
'thread-resume', 'thread-resume-evt', 'thread-rewind-receive',
'thread-running?', 'thread-suspend', 'thread-suspend-evt',
'thread-wait', 'thread/suspend-to-kill', 'thread?', 'time-apply',
'truncate', 'udp-addresses', 'udp-bind!', 'udp-bound?',
'udp-close', 'udp-connect!', 'udp-connected?', 'udp-open-socket',
'udp-receive!', 'udp-receive!*', 'udp-receive!-evt',
'udp-receive!/enable-break', 'udp-receive-ready-evt', 'udp-send',
'udp-send*', 'udp-send-evt', 'udp-send-ready-evt', 'udp-send-to',
'udp-send-to*', 'udp-send-to-evt', 'udp-send-to/enable-break',
'udp-send/enable-break', 'udp?', 'unbox',
'uncaught-exception-handler', 'use-collection-link-paths',
'use-compiled-file-paths', 'use-user-specific-search-paths',
'values', 'variable-reference->empty-namespace',
'variable-reference->module-base-phase',
'variable-reference->module-declaration-inspector',
'variable-reference->module-source',
'variable-reference->namespace', 'variable-reference->phase',
'variable-reference->resolved-module-path',
'variable-reference-constant?', 'variable-reference?', 'vector',
'vector->immutable-vector', 'vector->list',
'vector->pseudo-random-generator',
'vector->pseudo-random-generator!', 'vector->values',
'vector-fill!', 'vector-immutable', 'vector-length', 'vector-ref',
'vector-set!', 'vector-set-performance-stats!', 'vector?',
'version', 'void', 'void?', 'weak-box-value', 'weak-box?',
'will-execute', 'will-executor?', 'will-register',
'will-try-execute', 'with-input-from-file', 'with-output-to-file',
'wrap-evt', 'write', 'write-byte', 'write-bytes',
'write-bytes-avail', 'write-bytes-avail*', 'write-bytes-avail-evt',
'write-bytes-avail/enable-break', 'write-char', 'write-special',
'write-special-avail*', 'write-special-evt', 'write-string', 'zero?'
]
# From SchemeLexer
valid_name = r'[a-zA-Z0-9!$%&*+,/:<=>?@^_~|-]+'
tokens = {
'root' : [
(r';.*$', Comment.Single),
(r'#\|[^|]+\|#', Comment.Multiline),
# whitespaces - usually not relevant
(r'\s+', Text),
## numbers: Keep in mind Racket reader hash prefixes,
## which can denote the base or the type. These don't map
## neatly onto pygments token types; some judgment calls
## here. Note that none of these regexps attempt to
## exclude identifiers that start with a number, such as a
## variable named "100-Continue".
# #b
(r'#b[-+]?[01]+\.[01]+', Number.Float),
(r'#b[01]+e[-+]?[01]+', Number.Float),
(r'#b[-+]?[01]/[01]+', Number),
(r'#b[-+]?[01]+', Number.Integer),
(r'#b\S*', Error),
# #d OR no hash prefix
(r'(#d)?[-+]?\d+\.\d+', Number.Float),
(r'(#d)?\d+e[-+]?\d+', Number.Float),
(r'(#d)?[-+]?\d+/\d+', Number),
(r'(#d)?[-+]?\d+', Number.Integer),
(r'#d\S*', Error),
# #e
(r'#e[-+]?\d+\.\d+', Number.Float),
(r'#e\d+e[-+]?\d+', Number.Float),
(r'#e[-+]?\d+/\d+', Number),
(r'#e[-+]?\d+', Number),
(r'#e\S*', Error),
# #i is always inexact-real, i.e. float
(r'#i[-+]?\d+\.\d+', Number.Float),
(r'#i\d+e[-+]?\d+', Number.Float),
(r'#i[-+]?\d+/\d+', Number.Float),
(r'#i[-+]?\d+', Number.Float),
(r'#i\S*', Error),
# #o
(r'#o[-+]?[0-7]+\.[0-7]+', Number.Oct),
(r'#o[0-7]+e[-+]?[0-7]+', Number.Oct),
(r'#o[-+]?[0-7]+/[0-7]+', Number.Oct),
(r'#o[-+]?[0-7]+', Number.Oct),
(r'#o\S*', Error),
# #x
(r'#x[-+]?[0-9a-fA-F]+\.[0-9a-fA-F]+', Number.Hex),
# the exponent variation (e.g. #x1e1) is N/A
(r'#x[-+]?[0-9a-fA-F]+/[0-9a-fA-F]+', Number.Hex),
(r'#x[-+]?[0-9a-fA-F]+', Number.Hex),
(r'#x\S*', Error),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"#\\([()/'\"._!§$%& ?=+-]{1}|[a-zA-Z0-9]+)", String.Char),
(r'#rx".+"', String.Regex),
(r'#px".+"', String.Regex),
# constants
(r'(#t|#f)', Name.Constant),
# keyword argument names (e.g. #:keyword)
(r'#:\S+', Keyword.Declaration),
# #lang
(r'#lang \S+', Keyword.Namespace),
# special operators
(r"('|#|`|,@|,|\.)", Operator),
# highlight the keywords
('(%s)' % '|'.join([
re.escape(entry) + ' ' for entry in keywords]),
Keyword
),
# first variable in a quoted string like
# '(this is syntactic sugar)
(r"(?<='\()" + valid_name, Name.Variable),
(r"(?<=#\()" + valid_name, Name.Variable),
# highlight the builtins
("(?<=\()(%s)" % '|'.join([
re.escape(entry) + ' ' for entry in builtins]),
Name.Builtin
),
# the remaining functions; handle both ( and [
(r'(?<=(\(|\[|\{))' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# the famous parentheses!
(r'(\(|\)|\[|\]|\{|\})', Punctuation),
],
}
class SchemeLexer(RegexLexer):
"""
A Scheme lexer, parsing a stream and outputting the tokens
needed to highlight scheme code.
This lexer could be most probably easily subclassed to parse
other LISP-Dialects like Common Lisp, Emacs Lisp or AutoLisp.
This parser is checked with pastes from the LISP pastebin
at http://paste.lisp.org/ to cover as much syntax as possible.
It supports the full Scheme syntax as defined in R5RS.
*New in Pygments 0.6.*
"""
name = 'Scheme'
aliases = ['scheme', 'scm']
filenames = ['*.scm', '*.ss']
mimetypes = ['text/x-scheme', 'application/x-scheme']
# list of known keywords and builtins taken form vim 6.4 scheme.vim
# syntax file.
keywords = [
'lambda', 'define', 'if', 'else', 'cond', 'and', 'or', 'case', 'let',
'let*', 'letrec', 'begin', 'do', 'delay', 'set!', '=>', 'quote',
'quasiquote', 'unquote', 'unquote-splicing', 'define-syntax',
'let-syntax', 'letrec-syntax', 'syntax-rules'
]
builtins = [
'*', '+', '-', '/', '<', '<=', '=', '>', '>=', 'abs', 'acos', 'angle',
'append', 'apply', 'asin', 'assoc', 'assq', 'assv', 'atan',
'boolean?', 'caaaar', 'caaadr', 'caaar', 'caadar', 'caaddr', 'caadr',
'caar', 'cadaar', 'cadadr', 'cadar', 'caddar', 'cadddr', 'caddr',
'cadr', 'call-with-current-continuation', 'call-with-input-file',
'call-with-output-file', 'call-with-values', 'call/cc', 'car',
'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr',
'cdr', 'ceiling', 'char->integer', 'char-alphabetic?', 'char-ci<=?',
'char-ci<?', 'char-ci=?', 'char-ci>=?', 'char-ci>?', 'char-downcase',
'char-lower-case?', 'char-numeric?', 'char-ready?', 'char-upcase',
'char-upper-case?', 'char-whitespace?', 'char<=?', 'char<?', 'char=?',
'char>=?', 'char>?', 'char?', 'close-input-port', 'close-output-port',
'complex?', 'cons', 'cos', 'current-input-port', 'current-output-port',
'denominator', 'display', 'dynamic-wind', 'eof-object?', 'eq?',
'equal?', 'eqv?', 'eval', 'even?', 'exact->inexact', 'exact?', 'exp',
'expt', 'floor', 'for-each', 'force', 'gcd', 'imag-part',
'inexact->exact', 'inexact?', 'input-port?', 'integer->char',
'integer?', 'interaction-environment', 'lcm', 'length', 'list',
'list->string', 'list->vector', 'list-ref', 'list-tail', 'list?',
'load', 'log', 'magnitude', 'make-polar', 'make-rectangular',
'make-string', 'make-vector', 'map', 'max', 'member', 'memq', 'memv',
'min', 'modulo', 'negative?', 'newline', 'not', 'null-environment',
'null?', 'number->string', 'number?', 'numerator', 'odd?',
'open-input-file', 'open-output-file', 'output-port?', 'pair?',
'peek-char', 'port?', 'positive?', 'procedure?', 'quotient',
'rational?', 'rationalize', 'read', 'read-char', 'real-part', 'real?',
'remainder', 'reverse', 'round', 'scheme-report-environment',
'set-car!', 'set-cdr!', 'sin', 'sqrt', 'string', 'string->list',
'string->number', 'string->symbol', 'string-append', 'string-ci<=?',
'string-ci<?', 'string-ci=?', 'string-ci>=?', 'string-ci>?',
'string-copy', 'string-fill!', 'string-length', 'string-ref',
'string-set!', 'string<=?', 'string<?', 'string=?', 'string>=?',
'string>?', 'string?', 'substring', 'symbol->string', 'symbol?',
'tan', 'transcript-off', 'transcript-on', 'truncate', 'values',
'vector', 'vector->list', 'vector-fill!', 'vector-length',
'vector-ref', 'vector-set!', 'vector?', 'with-input-from-file',
'with-output-to-file', 'write', 'write-char', 'zero?'
]
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
valid_name = r'[a-zA-Z0-9!$%&*+,/:<=>?@^_~|-]+'
tokens = {
'root' : [
# the comments - always starting with semicolon
# and going to the end of the line
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'\s+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
# support for uncommon kinds of numbers -
# have to figure out what the characters mean
#(r'(#e|#i|#b|#o|#d|#x)[\d.]+', Number),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"#\\([()/'\"._!§$%& ?=+-]{1}|[a-zA-Z0-9]+)", String.Char),
# constants
(r'(#t|#f)', Name.Constant),
# special operators
(r"('|#|`|,@|,|\.)", Operator),
# highlight the keywords
('(%s)' % '|'.join([
re.escape(entry) + ' ' for entry in keywords]),
Keyword
),
# first variable in a quoted string like
# '(this is syntactic sugar)
(r"(?<='\()" + valid_name, Name.Variable),
(r"(?<=#\()" + valid_name, Name.Variable),
# highlight the builtins
("(?<=\()(%s)" % '|'.join([
re.escape(entry) + ' ' for entry in builtins]),
Name.Builtin
),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# the famous parentheses!
(r'(\(|\))', Punctuation),
(r'(\[|\])', Punctuation),
],
}
class CommonLispLexer(RegexLexer):
"""
A Common Lisp lexer.
*New in Pygments 0.9.*
"""
name = 'Common Lisp'
aliases = ['common-lisp', 'cl', 'lisp']
filenames = ['*.cl', '*.lisp', '*.el'] # use for Elisp too
mimetypes = ['text/x-common-lisp']
flags = re.IGNORECASE | re.MULTILINE
### couple of useful regexes
# characters that are not macro-characters and can be used to begin a symbol
nonmacro = r'\\.|[a-zA-Z0-9!$%&*+-/<=>?@\[\]^_{}~]'
constituent = nonmacro + '|[#.:]'
terminated = r'(?=[ "()\'\n,;`])' # whitespace or terminating macro characters
### symbol token, reverse-engineered from hyperspec
# Take a deep breath...
symbol = r'(\|[^|]+\||(?:%s)(?:%s)*)' % (nonmacro, constituent)
def __init__(self, **options):
from pygments.lexers._clbuiltins import BUILTIN_FUNCTIONS, \
SPECIAL_FORMS, MACROS, LAMBDA_LIST_KEYWORDS, DECLARATIONS, \
BUILTIN_TYPES, BUILTIN_CLASSES
self.builtin_function = BUILTIN_FUNCTIONS
self.special_forms = SPECIAL_FORMS
self.macros = MACROS
self.lambda_list_keywords = LAMBDA_LIST_KEYWORDS
self.declarations = DECLARATIONS
self.builtin_types = BUILTIN_TYPES
self.builtin_classes = BUILTIN_CLASSES
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Variable:
if value in self.builtin_function:
yield index, Name.Builtin, value
continue
if value in self.special_forms:
yield index, Keyword, value
continue
if value in self.macros:
yield index, Name.Builtin, value
continue
if value in self.lambda_list_keywords:
yield index, Keyword, value
continue
if value in self.declarations:
yield index, Keyword, value
continue
if value in self.builtin_types:
yield index, Keyword.Type, value
continue
if value in self.builtin_classes:
yield index, Name.Class, value
continue
yield index, token, value
tokens = {
'root' : [
('', Text, 'body'),
],
'multiline-comment' : [
(r'#\|', Comment.Multiline, '#push'), # (cf. Hyperspec 2.4.8.19)
(r'\|#', Comment.Multiline, '#pop'),
(r'[^|#]+', Comment.Multiline),
(r'[|#]', Comment.Multiline),
],
'commented-form' : [
(r'\(', Comment.Preproc, '#push'),
(r'\)', Comment.Preproc, '#pop'),
(r'[^()]+', Comment.Preproc),
],
'body' : [
# whitespace
(r'\s+', Text),
# single-line comment
(r';.*$', Comment.Single),
# multi-line comment
(r'#\|', Comment.Multiline, 'multiline-comment'),
# encoding comment (?)
(r'#\d*Y.*$', Comment.Special),
# strings and characters
(r'"(\\.|\\\n|[^"\\])*"', String),
# quoting
(r":" + symbol, String.Symbol),
(r"::" + symbol, String.Symbol),
(r":#" + symbol, String.Symbol),
(r"'" + symbol, String.Symbol),
(r"'", Operator),
(r"`", Operator),
# decimal numbers
(r'[-+]?\d+\.?' + terminated, Number.Integer),
(r'[-+]?\d+/\d+' + terminated, Number),
(r'[-+]?(\d*\.\d+([defls][-+]?\d+)?|\d+(\.\d*)?[defls][-+]?\d+)' \
+ terminated, Number.Float),
# sharpsign strings and characters
(r"#\\." + terminated, String.Char),
(r"#\\" + symbol, String.Char),
# vector
(r'#\(', Operator, 'body'),
# bitstring
(r'#\d*\*[01]*', Literal.Other),
# uninterned symbol
(r'#:' + symbol, String.Symbol),
# read-time and load-time evaluation
(r'#[.,]', Operator),
# function shorthand
(r'#\'', Name.Function),
# binary rational
(r'#[bB][+-]?[01]+(/[01]+)?', Number),
# octal rational
(r'#[oO][+-]?[0-7]+(/[0-7]+)?', Number.Oct),
# hex rational
(r'#[xX][+-]?[0-9a-fA-F]+(/[0-9a-fA-F]+)?', Number.Hex),
# radix rational
(r'#\d+[rR][+-]?[0-9a-zA-Z]+(/[0-9a-zA-Z]+)?', Number),
# complex
(r'(#[cC])(\()', bygroups(Number, Punctuation), 'body'),
# array
(r'(#\d+[aA])(\()', bygroups(Literal.Other, Punctuation), 'body'),
# structure
(r'(#[sS])(\()', bygroups(Literal.Other, Punctuation), 'body'),
# path
(r'#[pP]?"(\\.|[^"])*"', Literal.Other),
# reference
(r'#\d+=', Operator),
(r'#\d+#', Operator),
# read-time comment
(r'#+nil' + terminated + '\s*\(', Comment.Preproc, 'commented-form'),
# read-time conditional
(r'#[+-]', Operator),
# special operators that should have been parsed already
(r'(,@|,|\.)', Operator),
# special constants
(r'(t|nil)' + terminated, Name.Constant),
# functions and variables
(r'\*' + symbol + '\*', Name.Variable.Global),
(symbol, Name.Variable),
# parentheses
(r'\(', Punctuation, 'body'),
(r'\)', Punctuation, '#pop'),
],
}
class HaskellLexer(RegexLexer):
"""
A Haskell lexer based on the lexemes defined in the Haskell 98 Report.
*New in Pygments 0.8.*
"""
name = 'Haskell'
aliases = ['haskell', 'hs']
filenames = ['*.hs']
mimetypes = ['text/x-haskell']
reserved = ['case','class','data','default','deriving','do','else',
'if','in','infix[lr]?','instance',
'let','newtype','of','then','type','where','_']
ascii = ['NUL','SOH','[SE]TX','EOT','ENQ','ACK',
'BEL','BS','HT','LF','VT','FF','CR','S[OI]','DLE',
'DC[1-4]','NAK','SYN','ETB','CAN',
'EM','SUB','ESC','[FGRU]S','SP','DEL']
tokens = {
'root': [
# Whitespace:
(r'\s+', Text),
#(r'--\s*|.*$', Comment.Doc),
(r'--(?![!#$%&*+./<=>?@\^|_~:\\]).*?$', Comment.Single),
(r'{-', Comment.Multiline, 'comment'),
# Lexemes:
# Identifiers
(r'\bimport\b', Keyword.Reserved, 'import'),
(r'\bmodule\b', Keyword.Reserved, 'module'),
(r'\berror\b', Name.Exception),
(r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
(r'^[_a-z][\w\']*', Name.Function),
(r"'?[_a-z][\w']*", Name),
(r"('')?[A-Z][\w\']*", Keyword.Type),
# Operators
(r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator
(r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials
(r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators
# Numbers
(r'\d+[eE][+-]?\d+', Number.Float),
(r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
(r'0[oO][0-7]+', Number.Oct),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
# Character/String Literals
(r"'", String.Char, 'character'),
(r'"', String, 'string'),
# Special
(r'\[\]', Keyword.Type),
(r'\(\)', Name.Builtin),
(r'[][(),;`{}]', Punctuation),
],
'import': [
# Import statements
(r'\s+', Text),
(r'"', String, 'string'),
# after "funclist" state
(r'\)', Punctuation, '#pop'),
(r'qualified\b', Keyword),
# import X as Y
(r'([A-Z][a-zA-Z0-9_.]*)(\s+)(as)(\s+)([A-Z][a-zA-Z0-9_.]*)',
bygroups(Name.Namespace, Text, Keyword, Text, Name), '#pop'),
# import X hiding (functions)
(r'([A-Z][a-zA-Z0-9_.]*)(\s+)(hiding)(\s+)(\()',
bygroups(Name.Namespace, Text, Keyword, Text, Punctuation), 'funclist'),
# import X (functions)
(r'([A-Z][a-zA-Z0-9_.]*)(\s+)(\()',
bygroups(Name.Namespace, Text, Punctuation), 'funclist'),
# import X
(r'[a-zA-Z0-9_.]+', Name.Namespace, '#pop'),
],
'module': [
(r'\s+', Text),
(r'([A-Z][a-zA-Z0-9_.]*)(\s+)(\()',
bygroups(Name.Namespace, Text, Punctuation), 'funclist'),
(r'[A-Z][a-zA-Z0-9_.]*', Name.Namespace, '#pop'),
],
'funclist': [
(r'\s+', Text),
(r'[A-Z][a-zA-Z0-9_]*', Keyword.Type),
(r'(_[\w\']+|[a-z][\w\']*)', Name.Function),
(r'--.*$', Comment.Single),
(r'{-', Comment.Multiline, 'comment'),
(r',', Punctuation),
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator),
# (HACK, but it makes sense to push two instances, believe me)
(r'\(', Punctuation, ('funclist', 'funclist')),
(r'\)', Punctuation, '#pop:2'),
],
# NOTE: the next four states are shared in the AgdaLexer; make sure
# any change is compatible with Agda as well or copy over and change
'comment': [
# Multiline Comments
(r'[^-{}]+', Comment.Multiline),
(r'{-', Comment.Multiline, '#push'),
(r'-}', Comment.Multiline, '#pop'),
(r'[-{}]', Comment.Multiline),
],
'character': [
# Allows multi-chars, incorrectly.
(r"[^\\']", String.Char),
(r"\\", String.Escape, 'escape'),
("'", String.Char, '#pop'),
],
'string': [
(r'[^\\"]+', String),
(r"\\", String.Escape, 'escape'),
('"', String, '#pop'),
],
'escape': [
(r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
(r'\^[][A-Z@\^_]', String.Escape, '#pop'),
('|'.join(ascii), String.Escape, '#pop'),
(r'o[0-7]+', String.Escape, '#pop'),
(r'x[\da-fA-F]+', String.Escape, '#pop'),
(r'\d+', String.Escape, '#pop'),
(r'\s+\\', String.Escape, '#pop'),
],
}
class AgdaLexer(RegexLexer):
"""
For the `Agda <http://wiki.portal.chalmers.se/agda/pmwiki.php>`_
dependently typed functional programming language and proof assistant.
*New in Pygments 1.7.*
"""
name = 'Agda'
aliases = ['agda']
filenames = ['*.agda']
mimetypes = ['text/x-agda']
reserved = ['abstract', 'codata', 'coinductive', 'constructor', 'data',
'field', 'forall', 'hiding', 'in', 'inductive', 'infix',
'infixl', 'infixr', 'let', 'open', 'pattern', 'primitive',
'private', 'mutual', 'quote', 'quoteGoal', 'quoteTerm',
'record', 'syntax', 'rewrite', 'unquote', 'using', 'where',
'with']
tokens = {
'root': [
# Declaration
(r'^(\s*)([^\s\(\)\{\}]+)(\s*)(:)(\s*)',
bygroups(Text, Name.Function, Text, Operator.Word, Text)),
# Comments
(r'--(?![!#$%&*+./<=>?@\^|_~:\\]).*?$', Comment.Single),
(r'{-', Comment.Multiline, 'comment'),
# Holes
(r'{!', Comment.Directive, 'hole'),
# Lexemes:
# Identifiers
(ur'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
(r'(import|module)(\s+)', bygroups(Keyword.Reserved, Text), 'module'),
(r'\b(Set|Prop)\b', Keyword.Type),
# Special Symbols
(r'(\(|\)|\{|\})', Operator),
(ur'(\.{1,3}|\||[\u039B]|[\u2200]|[\u2192]|:|=|->)', Operator.Word),
# Numbers
(r'\d+[eE][+-]?\d+', Number.Float),
(r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
# Strings
(r"'", String.Char, 'character'),
(r'"', String, 'string'),
(r'[^\s\(\)\{\}]+', Text),
(r'\s+?', Text), # Whitespace
],
'hole': [
# Holes
(r'[^!{}]+', Comment.Directive),
(r'{!', Comment.Directive, '#push'),
(r'!}', Comment.Directive, '#pop'),
(r'[!{}]', Comment.Directive),
],
'module': [
(r'{-', Comment.Multiline, 'comment'),
(r'[a-zA-Z][a-zA-Z0-9_.]*', Name, '#pop'),
(r'[^a-zA-Z]*', Text)
],
'comment': HaskellLexer.tokens['comment'],
'character': HaskellLexer.tokens['character'],
'string': HaskellLexer.tokens['string'],
'escape': HaskellLexer.tokens['escape']
}
class LiterateLexer(Lexer):
"""
Base class for lexers of literate file formats based on LaTeX or Bird-style
(prefixing each code line with ">").
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
"""
bird_re = re.compile(r'(>[ \t]*)(.*\n)')
def __init__(self, baselexer, **options):
self.baselexer = baselexer
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
style = self.options.get('litstyle')
if style is None:
style = (text.lstrip()[0:1] in '%\\') and 'latex' or 'bird'
code = ''
insertions = []
if style == 'bird':
# bird-style
for match in line_re.finditer(text):
line = match.group()
m = self.bird_re.match(line)
if m:
insertions.append((len(code),
[(0, Comment.Special, m.group(1))]))
code += m.group(2)
else:
insertions.append((len(code), [(0, Text, line)]))
else:
# latex-style
from pygments.lexers.text import TexLexer
lxlexer = TexLexer(**self.options)
codelines = 0
latex = ''
for match in line_re.finditer(text):
line = match.group()
if codelines:
if line.lstrip().startswith('\\end{code}'):
codelines = 0
latex += line
else:
code += line
elif line.lstrip().startswith('\\begin{code}'):
codelines = 1
latex += line
insertions.append((len(code),
list(lxlexer.get_tokens_unprocessed(latex))))
latex = ''
else:
latex += line
insertions.append((len(code),
list(lxlexer.get_tokens_unprocessed(latex))))
for item in do_insertions(insertions, self.baselexer.get_tokens_unprocessed(code)):
yield item
class LiterateHaskellLexer(LiterateLexer):
"""
For Literate Haskell (Bird-style or LaTeX) source.
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
*New in Pygments 0.9.*
"""
name = 'Literate Haskell'
aliases = ['lhs', 'literate-haskell', 'lhaskell']
filenames = ['*.lhs']
mimetypes = ['text/x-literate-haskell']
def __init__(self, **options):
hslexer = HaskellLexer(**options)
LiterateLexer.__init__(self, hslexer, **options)
class LiterateAgdaLexer(LiterateLexer):
"""
For Literate Agda source.
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
*New in Pygments 1.7.*
"""
name = 'Literate Agda'
aliases = ['lagda', 'literate-agda']
filenames = ['*.lagda']
mimetypes = ['text/x-literate-agda']
def __init__(self, **options):
agdalexer = AgdaLexer(**options)
LiterateLexer.__init__(self, agdalexer, litstyle='latex', **options)
class SMLLexer(RegexLexer):
"""
For the Standard ML language.
*New in Pygments 1.5.*
"""
name = 'Standard ML'
aliases = ['sml']
filenames = ['*.sml', '*.sig', '*.fun',]
mimetypes = ['text/x-standardml', 'application/x-standardml']
alphanumid_reserved = [
# Core
'abstype', 'and', 'andalso', 'as', 'case', 'datatype', 'do', 'else',
'end', 'exception', 'fn', 'fun', 'handle', 'if', 'in', 'infix',
'infixr', 'let', 'local', 'nonfix', 'of', 'op', 'open', 'orelse',
'raise', 'rec', 'then', 'type', 'val', 'with', 'withtype', 'while',
# Modules
'eqtype', 'functor', 'include', 'sharing', 'sig', 'signature',
'struct', 'structure', 'where',
]
symbolicid_reserved = [
# Core
':', '\|', '=', '=>', '->', '#',
# Modules
':>',
]
nonid_reserved = [ '(', ')', '[', ']', '{', '}', ',', ';', '...', '_' ]
alphanumid_re = r"[a-zA-Z][a-zA-Z0-9_']*"
symbolicid_re = r"[!%&$#+\-/:<=>?@\\~`^|*]+"
# A character constant is a sequence of the form #s, where s is a string
# constant denoting a string of size one character. This setup just parses
# the entire string as either a String.Double or a String.Char (depending
# on the argument), even if the String.Char is an erronous
# multiple-character string.
def stringy (whatkind):
return [
(r'[^"\\]', whatkind),
(r'\\[\\\"abtnvfr]', String.Escape),
# Control-character notation is used for codes < 32,
# where \^@ == \000
(r'\\\^[\x40-\x5e]', String.Escape),
# Docs say 'decimal digits'
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\\s+\\', String.Interpol),
(r'"', whatkind, '#pop'),
]
# Callbacks for distinguishing tokens and reserved words
def long_id_callback(self, match):
if match.group(1) in self.alphanumid_reserved: token = Error
else: token = Name.Namespace
yield match.start(1), token, match.group(1)
yield match.start(2), Punctuation, match.group(2)
def end_id_callback(self, match):
if match.group(1) in self.alphanumid_reserved: token = Error
elif match.group(1) in self.symbolicid_reserved: token = Error
else: token = Name
yield match.start(1), token, match.group(1)
def id_callback(self, match):
str = match.group(1)
if str in self.alphanumid_reserved: token = Keyword.Reserved
elif str in self.symbolicid_reserved: token = Punctuation
else: token = Name
yield match.start(1), token, str
tokens = {
# Whitespace and comments are (almost) everywhere
'whitespace': [
(r'\s+', Text),
(r'\(\*', Comment.Multiline, 'comment'),
],
'delimiters': [
# This lexer treats these delimiters specially:
# Delimiters define scopes, and the scope is how the meaning of
# the `|' is resolved - is it a case/handle expression, or function
# definition by cases? (This is not how the Definition works, but
# it's how MLton behaves, see http://mlton.org/SMLNJDeviations)
(r'\(|\[|{', Punctuation, 'main'),
(r'\)|\]|}', Punctuation, '#pop'),
(r'\b(let|if|local)\b(?!\')', Keyword.Reserved, ('main', 'main')),
(r'\b(struct|sig|while)\b(?!\')', Keyword.Reserved, 'main'),
(r'\b(do|else|end|in|then)\b(?!\')', Keyword.Reserved, '#pop'),
],
'core': [
# Punctuation that doesn't overlap symbolic identifiers
(r'(%s)' % '|'.join([re.escape(z) for z in nonid_reserved]),
Punctuation),
# Special constants: strings, floats, numbers in decimal and hex
(r'#"', String.Char, 'char'),
(r'"', String.Double, 'string'),
(r'~?0x[0-9a-fA-F]+', Number.Hex),
(r'0wx[0-9a-fA-F]+', Number.Hex),
(r'0w\d+', Number.Integer),
(r'~?\d+\.\d+[eE]~?\d+', Number.Float),
(r'~?\d+\.\d+', Number.Float),
(r'~?\d+[eE]~?\d+', Number.Float),
(r'~?\d+', Number.Integer),
# Labels
(r'#\s*[1-9][0-9]*', Name.Label),
(r'#\s*(%s)' % alphanumid_re, Name.Label),
(r'#\s+(%s)' % symbolicid_re, Name.Label),
# Some reserved words trigger a special, local lexer state change
(r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'),
(r'(?=\b(exception)\b(?!\'))', Text, ('ename')),
(r'\b(functor|include|open|signature|structure)\b(?!\')',
Keyword.Reserved, 'sname'),
(r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'),
# Regular identifiers, long and otherwise
(r'\'[0-9a-zA-Z_\']*', Name.Decorator),
(r'(%s)(\.)' % alphanumid_re, long_id_callback, "dotted"),
(r'(%s)' % alphanumid_re, id_callback),
(r'(%s)' % symbolicid_re, id_callback),
],
'dotted': [
(r'(%s)(\.)' % alphanumid_re, long_id_callback),
(r'(%s)' % alphanumid_re, end_id_callback, "#pop"),
(r'(%s)' % symbolicid_re, end_id_callback, "#pop"),
(r'\s+', Error),
(r'\S+', Error),
],
# Main parser (prevents errors in files that have scoping errors)
'root': [ (r'', Text, 'main') ],
# In this scope, I expect '|' to not be followed by a function name,
# and I expect 'and' to be followed by a binding site
'main': [
include('whitespace'),
# Special behavior of val/and/fun
(r'\b(val|and)\b(?!\')', Keyword.Reserved, 'vname'),
(r'\b(fun)\b(?!\')', Keyword.Reserved,
('#pop', 'main-fun', 'fname')),
include('delimiters'),
include('core'),
(r'\S+', Error),
],
# In this scope, I expect '|' and 'and' to be followed by a function
'main-fun': [
include('whitespace'),
(r'\s', Text),
(r'\(\*', Comment.Multiline, 'comment'),
# Special behavior of val/and/fun
(r'\b(fun|and)\b(?!\')', Keyword.Reserved, 'fname'),
(r'\b(val)\b(?!\')', Keyword.Reserved,
('#pop', 'main', 'vname')),
# Special behavior of '|' and '|'-manipulating keywords
(r'\|', Punctuation, 'fname'),
(r'\b(case|handle)\b(?!\')', Keyword.Reserved,
('#pop', 'main')),
include('delimiters'),
include('core'),
(r'\S+', Error),
],
# Character and string parsers
'char': stringy(String.Char),
'string': stringy(String.Double),
'breakout': [
(r'(?=\b(%s)\b(?!\'))' % '|'.join(alphanumid_reserved), Text, '#pop'),
],
# Dealing with what comes after module system keywords
'sname': [
include('whitespace'),
include('breakout'),
(r'(%s)' % alphanumid_re, Name.Namespace),
(r'', Text, '#pop'),
],
# Dealing with what comes after the 'fun' (or 'and' or '|') keyword
'fname': [
include('whitespace'),
(r'\'[0-9a-zA-Z_\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(%s)' % alphanumid_re, Name.Function, '#pop'),
(r'(%s)' % symbolicid_re, Name.Function, '#pop'),
# Ignore interesting function declarations like "fun (x + y) = ..."
(r'', Text, '#pop'),
],
# Dealing with what comes after the 'val' (or 'and') keyword
'vname': [
include('whitespace'),
(r'\'[0-9a-zA-Z_\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(%s)(\s*)(=(?!%s))' % (alphanumid_re, symbolicid_re),
bygroups(Name.Variable, Text, Punctuation), '#pop'),
(r'(%s)(\s*)(=(?!%s))' % (symbolicid_re, symbolicid_re),
bygroups(Name.Variable, Text, Punctuation), '#pop'),
(r'(%s)' % alphanumid_re, Name.Variable, '#pop'),
(r'(%s)' % symbolicid_re, Name.Variable, '#pop'),
# Ignore interesting patterns like 'val (x, y)'
(r'', Text, '#pop'),
],
# Dealing with what comes after the 'type' (or 'and') keyword
'tname': [
include('whitespace'),
include('breakout'),
(r'\'[0-9a-zA-Z_\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'=(?!%s)' % symbolicid_re, Punctuation, ('#pop', 'typbind')),
(r'(%s)' % alphanumid_re, Keyword.Type),
(r'(%s)' % symbolicid_re, Keyword.Type),
(r'\S+', Error, '#pop'),
],
# A type binding includes most identifiers
'typbind': [
include('whitespace'),
(r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')),
include('breakout'),
include('core'),
(r'\S+', Error, '#pop'),
],
# Dealing with what comes after the 'datatype' (or 'and') keyword
'dname': [
include('whitespace'),
include('breakout'),
(r'\'[0-9a-zA-Z_\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(=)(\s*)(datatype)',
bygroups(Punctuation, Text, Keyword.Reserved), '#pop'),
(r'=(?!%s)' % symbolicid_re, Punctuation,
('#pop', 'datbind', 'datcon')),
(r'(%s)' % alphanumid_re, Keyword.Type),
(r'(%s)' % symbolicid_re, Keyword.Type),
(r'\S+', Error, '#pop'),
],
# common case - A | B | C of int
'datbind': [
include('whitespace'),
(r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'dname')),
(r'\b(withtype)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')),
(r'\b(of)\b(?!\')', Keyword.Reserved),
(r'(\|)(\s*)(%s)' % alphanumid_re,
bygroups(Punctuation, Text, Name.Class)),
(r'(\|)(\s+)(%s)' % symbolicid_re,
bygroups(Punctuation, Text, Name.Class)),
include('breakout'),
include('core'),
(r'\S+', Error),
],
# Dealing with what comes after an exception
'ename': [
include('whitespace'),
(r'(exception|and)\b(\s+)(%s)' % alphanumid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'(exception|and)\b(\s*)(%s)' % symbolicid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(of)\b(?!\')', Keyword.Reserved),
include('breakout'),
include('core'),
(r'\S+', Error),
],
'datcon': [
include('whitespace'),
(r'(%s)' % alphanumid_re, Name.Class, '#pop'),
(r'(%s)' % symbolicid_re, Name.Class, '#pop'),
(r'\S+', Error, '#pop'),
],
# Series of type variables
'tyvarseq': [
(r'\s', Text),
(r'\(\*', Comment.Multiline, 'comment'),
(r'\'[0-9a-zA-Z_\']*', Name.Decorator),
(alphanumid_re, Name),
(r',', Punctuation),
(r'\)', Punctuation, '#pop'),
(symbolicid_re, Name),
],
'comment': [
(r'[^(*)]', Comment.Multiline),
(r'\(\*', Comment.Multiline, '#push'),
(r'\*\)', Comment.Multiline, '#pop'),
(r'[(*)]', Comment.Multiline),
],
}
class OcamlLexer(RegexLexer):
"""
For the OCaml language.
*New in Pygments 0.7.*
"""
name = 'OCaml'
aliases = ['ocaml']
filenames = ['*.ml', '*.mli', '*.mll', '*.mly']
mimetypes = ['text/x-ocaml']
keywords = [
'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done',
'downto', 'else', 'end', 'exception', 'external', 'false',
'for', 'fun', 'function', 'functor', 'if', 'in', 'include',
'inherit', 'initializer', 'lazy', 'let', 'match', 'method',
'module', 'mutable', 'new', 'object', 'of', 'open', 'private',
'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
'type', 'value', 'val', 'virtual', 'when', 'while', 'with',
]
keyopts = [
'!=','#','&','&&','\(','\)','\*','\+',',','-',
'-\.','->','\.','\.\.',':','::',':=',':>',';',';;','<',
'<-','=','>','>]','>}','\?','\?\?','\[','\[<','\[>','\[\|',
']','_','`','{','{<','\|','\|]','}','~'
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = ['unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array']
tokens = {
'escape-sequence': [
(r'\\[\\\"\'ntbr]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b([A-Z][A-Za-z0-9_\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][A-Za-z0-9_\']*)', Name.Class),
(r'\(\*(?![)])', Comment, 'comment'),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)' % '|'.join(keyopts[::-1]), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r"[^\W\d][\w']*", Name),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Binary),
(r'\d[\d_]*', Number.Integer),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'string': [
(r'[^\\"]+', String.Double),
include('escape-sequence'),
(r'\\\n', String.Double),
(r'"', String.Double, '#pop'),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][A-Za-z0-9_\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][A-Za-z0-9_\']*', Name.Class, '#pop'),
(r'[a-z_][A-Za-z0-9_\']*', Name, '#pop'),
],
}
class ErlangLexer(RegexLexer):
"""
For the Erlang functional programming language.
Blame Jeremy Thurgood (http://jerith.za.net/).
*New in Pygments 0.9.*
"""
name = 'Erlang'
aliases = ['erlang']
filenames = ['*.erl', '*.hrl', '*.es', '*.escript']
mimetypes = ['text/x-erlang']
keywords = [
'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if',
'let', 'of', 'query', 'receive', 'try', 'when',
]
builtins = [ # See erlang(3) man page
'abs', 'append_element', 'apply', 'atom_to_list', 'binary_to_list',
'bitstring_to_list', 'binary_to_term', 'bit_size', 'bump_reductions',
'byte_size', 'cancel_timer', 'check_process_code', 'delete_module',
'demonitor', 'disconnect_node', 'display', 'element', 'erase', 'exit',
'float', 'float_to_list', 'fun_info', 'fun_to_list',
'function_exported', 'garbage_collect', 'get', 'get_keys',
'group_leader', 'hash', 'hd', 'integer_to_list', 'iolist_to_binary',
'iolist_size', 'is_atom', 'is_binary', 'is_bitstring', 'is_boolean',
'is_builtin', 'is_float', 'is_function', 'is_integer', 'is_list',
'is_number', 'is_pid', 'is_port', 'is_process_alive', 'is_record',
'is_reference', 'is_tuple', 'length', 'link', 'list_to_atom',
'list_to_binary', 'list_to_bitstring', 'list_to_existing_atom',
'list_to_float', 'list_to_integer', 'list_to_pid', 'list_to_tuple',
'load_module', 'localtime_to_universaltime', 'make_tuple', 'md5',
'md5_final', 'md5_update', 'memory', 'module_loaded', 'monitor',
'monitor_node', 'node', 'nodes', 'open_port', 'phash', 'phash2',
'pid_to_list', 'port_close', 'port_command', 'port_connect',
'port_control', 'port_call', 'port_info', 'port_to_list',
'process_display', 'process_flag', 'process_info', 'purge_module',
'put', 'read_timer', 'ref_to_list', 'register', 'resume_process',
'round', 'send', 'send_after', 'send_nosuspend', 'set_cookie',
'setelement', 'size', 'spawn', 'spawn_link', 'spawn_monitor',
'spawn_opt', 'split_binary', 'start_timer', 'statistics',
'suspend_process', 'system_flag', 'system_info', 'system_monitor',
'system_profile', 'term_to_binary', 'tl', 'trace', 'trace_delivered',
'trace_info', 'trace_pattern', 'trunc', 'tuple_size', 'tuple_to_list',
'universaltime_to_localtime', 'unlink', 'unregister', 'whereis'
]
operators = r'(\+\+?|--?|\*|/|<|>|/=|=:=|=/=|=<|>=|==?|<-|!|\?)'
word_operators = [
'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor',
'div', 'not', 'or', 'orelse', 'rem', 'xor'
]
atom_re = r"(?:[a-z][a-zA-Z0-9_]*|'[^\n']*[^\\]')"
variable_re = r'(?:[A-Z_][a-zA-Z0-9_]*)'
escape_re = r'(?:\\(?:[bdefnrstv\'"\\/]|[0-7][0-7]?[0-7]?|\^[a-zA-Z]))'
macro_re = r'(?:'+variable_re+r'|'+atom_re+r')'
base_re = r'(?:[2-9]|[12][0-9]|3[0-6])'
tokens = {
'root': [
(r'\s+', Text),
(r'%.*\n', Comment),
('(' + '|'.join(keywords) + r')\b', Keyword),
('(' + '|'.join(builtins) + r')\b', Name.Builtin),
('(' + '|'.join(word_operators) + r')\b', Operator.Word),
(r'^-', Punctuation, 'directive'),
(operators, Operator),
(r'"', String, 'string'),
(r'<<', Name.Label),
(r'>>', Name.Label),
('(' + atom_re + ')(:)', bygroups(Name.Namespace, Punctuation)),
('(?:^|(?<=:))(' + atom_re + r')(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[+-]?'+base_re+r'#[0-9a-zA-Z]+', Number.Integer),
(r'[+-]?\d+', Number.Integer),
(r'[+-]?\d+.\d+', Number.Float),
(r'[]\[:_@\".{}()|;,]', Punctuation),
(variable_re, Name.Variable),
(atom_re, Name),
(r'\?'+macro_re, Name.Constant),
(r'\$(?:'+escape_re+r'|\\[ %]|[^\\])', String.Char),
(r'#'+atom_re+r'(:?\.'+atom_re+r')?', Name.Label),
],
'string': [
(escape_re, String.Escape),
(r'"', String, '#pop'),
(r'~[0-9.*]*[~#+bBcdefginpPswWxX]', String.Interpol),
(r'[^"\\~]+', String),
(r'~', String),
],
'directive': [
(r'(define)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Constant), '#pop'),
(r'(record)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Label), '#pop'),
(atom_re, Name.Entity, '#pop'),
],
}
class ErlangShellLexer(Lexer):
"""
Shell sessions in erl (for Erlang code).
*New in Pygments 1.1.*
"""
name = 'Erlang erl session'
aliases = ['erl']
filenames = ['*.erl-sh']
mimetypes = ['text/x-erl-shellsession']
_prompt_re = re.compile(r'\d+>(?=\s|\Z)')
def get_tokens_unprocessed(self, text):
erlexer = ErlangLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('*'):
yield match.start(), Generic.Traceback, line
else:
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
class OpaLexer(RegexLexer):
"""
Lexer for the Opa language (http://opalang.org).
*New in Pygments 1.5.*
"""
name = 'Opa'
aliases = ['opa']
filenames = ['*.opa']
mimetypes = ['text/x-opa']
# most of these aren't strictly keywords
# but if you color only real keywords, you might just
# as well not color anything
keywords = [
'and', 'as', 'begin', 'case', 'client', 'css', 'database', 'db', 'do',
'else', 'end', 'external', 'forall', 'function', 'if', 'import',
'match', 'module', 'or', 'package', 'parser', 'rec', 'server', 'then',
'type', 'val', 'with', 'xml_parser',
]
# matches both stuff and `stuff`
ident_re = r'(([a-zA-Z_]\w*)|(`[^`]*`))'
op_re = r'[.=\-<>,@~%/+?*&^!]'
punc_re = r'[()\[\],;|]' # '{' and '}' are treated elsewhere
# because they are also used for inserts
tokens = {
# copied from the caml lexer, should be adapted
'escape-sequence': [
(r'\\[\\\"\'ntr}]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
# factorizing these rules, because they are inserted many times
'comments': [
(r'/\*', Comment, 'nested-comment'),
(r'//.*?$', Comment),
],
'comments-and-spaces': [
include('comments'),
(r'\s+', Text),
],
'root': [
include('comments-and-spaces'),
# keywords
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
# directives
# we could parse the actual set of directives instead of anything
# starting with @, but this is troublesome
# because it needs to be adjusted all the time
# and assuming we parse only sources that compile, it is useless
(r'@'+ident_re+r'\b', Name.Builtin.Pseudo),
# number literals
(r'-?.[\d]+([eE][+\-]?\d+)', Number.Float),
(r'-?\d+.\d*([eE][+\-]?\d+)', Number.Float),
(r'-?\d+[eE][+\-]?\d+', Number.Float),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'0[oO][0-7]+', Number.Oct),
(r'0[bB][01]+', Number.Binary),
(r'\d+', Number.Integer),
# color literals
(r'#[\da-fA-F]{3,6}', Number.Integer),
# string literals
(r'"', String.Double, 'string'),
# char literal, should be checked because this is the regexp from
# the caml lexer
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2})|.)'",
String.Char),
# this is meant to deal with embedded exprs in strings
# every time we find a '}' we pop a state so that if we were
# inside a string, we are back in the string state
# as a consequence, we must also push a state every time we find a
# '{' or else we will have errors when parsing {} for instance
(r'{', Operator, '#push'),
(r'}', Operator, '#pop'),
# html literals
# this is a much more strict that the actual parser,
# since a<b would not be parsed as html
# but then again, the parser is way too lax, and we can't hope
# to have something as tolerant
(r'<(?=[a-zA-Z>])', String.Single, 'html-open-tag'),
# db path
# matching the '[_]' in '/a[_]' because it is a part
# of the syntax of the db path definition
# unfortunately, i don't know how to match the ']' in
# /a[1], so this is somewhat inconsistent
(r'[@?!]?(/\w+)+(\[_\])?', Name.Variable),
# putting the same color on <- as on db path, since
# it can be used only to mean Db.write
(r'<-(?!'+op_re+r')', Name.Variable),
# 'modules'
# although modules are not distinguished by their names as in caml
# the standard library seems to follow the convention that modules
# only area capitalized
(r'\b([A-Z]\w*)(?=\.)', Name.Namespace),
# operators
# = has a special role because this is the only
# way to syntactic distinguish binding constructions
# unfortunately, this colors the equal in {x=2} too
(r'=(?!'+op_re+r')', Keyword),
(r'(%s)+' % op_re, Operator),
(r'(%s)+' % punc_re, Operator),
# coercions
(r':', Operator, 'type'),
# type variables
# we need this rule because we don't parse specially type
# definitions so in "type t('a) = ...", "'a" is parsed by 'root'
("'"+ident_re, Keyword.Type),
# id literal, #something, or #{expr}
(r'#'+ident_re, String.Single),
(r'#(?={)', String.Single),
# identifiers
# this avoids to color '2' in 'a2' as an integer
(ident_re, Text),
# default, not sure if that is needed or not
# (r'.', Text),
],
# it is quite painful to have to parse types to know where they end
# this is the general rule for a type
# a type is either:
# * -> ty
# * type-with-slash
# * type-with-slash -> ty
# * type-with-slash (, type-with-slash)+ -> ty
#
# the code is pretty funky in here, but this code would roughly
# translate in caml to:
# let rec type stream =
# match stream with
# | [< "->"; stream >] -> type stream
# | [< ""; stream >] ->
# type_with_slash stream
# type_lhs_1 stream;
# and type_1 stream = ...
'type': [
include('comments-and-spaces'),
(r'->', Keyword.Type),
(r'', Keyword.Type, ('#pop', 'type-lhs-1', 'type-with-slash')),
],
# parses all the atomic or closed constructions in the syntax of type
# expressions: record types, tuple types, type constructors, basic type
# and type variables
'type-1': [
include('comments-and-spaces'),
(r'\(', Keyword.Type, ('#pop', 'type-tuple')),
(r'~?{', Keyword.Type, ('#pop', 'type-record')),
(ident_re+r'\(', Keyword.Type, ('#pop', 'type-tuple')),
(ident_re, Keyword.Type, '#pop'),
("'"+ident_re, Keyword.Type),
# this case is not in the syntax but sometimes
# we think we are parsing types when in fact we are parsing
# some css, so we just pop the states until we get back into
# the root state
(r'', Keyword.Type, '#pop'),
],
# type-with-slash is either:
# * type-1
# * type-1 (/ type-1)+
'type-with-slash': [
include('comments-and-spaces'),
(r'', Keyword.Type, ('#pop', 'slash-type-1', 'type-1')),
],
'slash-type-1': [
include('comments-and-spaces'),
('/', Keyword.Type, ('#pop', 'type-1')),
# same remark as above
(r'', Keyword.Type, '#pop'),
],
# we go in this state after having parsed a type-with-slash
# while trying to parse a type
# and at this point we must determine if we are parsing an arrow
# type (in which case we must continue parsing) or not (in which
# case we stop)
'type-lhs-1': [
include('comments-and-spaces'),
(r'->', Keyword.Type, ('#pop', 'type')),
(r'(?=,)', Keyword.Type, ('#pop', 'type-arrow')),
(r'', Keyword.Type, '#pop'),
],
'type-arrow': [
include('comments-and-spaces'),
# the look ahead here allows to parse f(x : int, y : float -> truc)
# correctly
(r',(?=[^:]*?->)', Keyword.Type, 'type-with-slash'),
(r'->', Keyword.Type, ('#pop', 'type')),
# same remark as above
(r'', Keyword.Type, '#pop'),
],
# no need to do precise parsing for tuples and records
# because they are closed constructions, so we can simply
# find the closing delimiter
# note that this function would be not work if the source
# contained identifiers like `{)` (although it could be patched
# to support it)
'type-tuple': [
include('comments-and-spaces'),
(r'[^\(\)/*]+', Keyword.Type),
(r'[/*]', Keyword.Type),
(r'\(', Keyword.Type, '#push'),
(r'\)', Keyword.Type, '#pop'),
],
'type-record': [
include('comments-and-spaces'),
(r'[^{}/*]+', Keyword.Type),
(r'[/*]', Keyword.Type),
(r'{', Keyword.Type, '#push'),
(r'}', Keyword.Type, '#pop'),
],
# 'type-tuple': [
# include('comments-and-spaces'),
# (r'\)', Keyword.Type, '#pop'),
# (r'', Keyword.Type, ('#pop', 'type-tuple-1', 'type-1')),
# ],
# 'type-tuple-1': [
# include('comments-and-spaces'),
# (r',?\s*\)', Keyword.Type, '#pop'), # ,) is a valid end of tuple, in (1,)
# (r',', Keyword.Type, 'type-1'),
# ],
# 'type-record':[
# include('comments-and-spaces'),
# (r'}', Keyword.Type, '#pop'),
# (r'~?(?:\w+|`[^`]*`)', Keyword.Type, 'type-record-field-expr'),
# ],
# 'type-record-field-expr': [
#
# ],
'nested-comment': [
(r'[^/*]+', Comment),
(r'/\*', Comment, '#push'),
(r'\*/', Comment, '#pop'),
(r'[/*]', Comment),
],
# the copy pasting between string and single-string
# is kinda sad. Is there a way to avoid that??
'string': [
(r'[^\\"{]+', String.Double),
(r'"', String.Double, '#pop'),
(r'{', Operator, 'root'),
include('escape-sequence'),
],
'single-string': [
(r'[^\\\'{]+', String.Double),
(r'\'', String.Double, '#pop'),
(r'{', Operator, 'root'),
include('escape-sequence'),
],
# all the html stuff
# can't really reuse some existing html parser
# because we must be able to parse embedded expressions
# we are in this state after someone parsed the '<' that
# started the html literal
'html-open-tag': [
(r'[\w\-:]+', String.Single, ('#pop', 'html-attr')),
(r'>', String.Single, ('#pop', 'html-content')),
],
# we are in this state after someone parsed the '</' that
# started the end of the closing tag
'html-end-tag': [
# this is a star, because </> is allowed
(r'[\w\-:]*>', String.Single, '#pop'),
],
# we are in this state after having parsed '<ident(:ident)?'
# we thus parse a possibly empty list of attributes
'html-attr': [
(r'\s+', Text),
(r'[\w\-:]+=', String.Single, 'html-attr-value'),
(r'/>', String.Single, '#pop'),
(r'>', String.Single, ('#pop', 'html-content')),
],
'html-attr-value': [
(r"'", String.Single, ('#pop', 'single-string')),
(r'"', String.Single, ('#pop', 'string')),
(r'#'+ident_re, String.Single, '#pop'),
(r'#(?={)', String.Single, ('#pop', 'root')),
(r'[^"\'{`=<>]+', String.Single, '#pop'),
(r'{', Operator, ('#pop', 'root')), # this is a tail call!
],
# we should probably deal with '\' escapes here
'html-content': [
(r'<!--', Comment, 'html-comment'),
(r'</', String.Single, ('#pop', 'html-end-tag')),
(r'<', String.Single, 'html-open-tag'),
(r'{', Operator, 'root'),
(r'[^<{]+', String.Single),
],
'html-comment': [
(r'-->', Comment, '#pop'),
(r'[^\-]+|-', Comment),
],
}
class CoqLexer(RegexLexer):
"""
For the `Coq <http://coq.inria.fr/>`_ theorem prover.
*New in Pygments 1.5.*
"""
name = 'Coq'
aliases = ['coq']
filenames = ['*.v']
mimetypes = ['text/x-coq']
keywords1 = [
# Vernacular commands
'Section', 'Module', 'End', 'Require', 'Import', 'Export', 'Variable',
'Variables', 'Parameter', 'Parameters', 'Axiom', 'Hypothesis',
'Hypotheses', 'Notation', 'Local', 'Tactic', 'Reserved', 'Scope',
'Open', 'Close', 'Bind', 'Delimit', 'Definition', 'Let', 'Ltac',
'Fixpoint', 'CoFixpoint', 'Morphism', 'Relation', 'Implicit',
'Arguments', 'Set', 'Unset', 'Contextual', 'Strict', 'Prenex',
'Implicits', 'Inductive', 'CoInductive', 'Record', 'Structure',
'Canonical', 'Coercion', 'Theorem', 'Lemma', 'Corollary',
'Proposition', 'Fact', 'Remark', 'Example', 'Proof', 'Goal', 'Save',
'Qed', 'Defined', 'Hint', 'Resolve', 'Rewrite', 'View', 'Search',
'Show', 'Print', 'Printing', 'All', 'Graph', 'Projections', 'inside',
'outside',
]
keywords2 = [
# Gallina
'forall', 'exists', 'exists2', 'fun', 'fix', 'cofix', 'struct',
'match', 'end', 'in', 'return', 'let', 'if', 'is', 'then', 'else',
'for', 'of', 'nosimpl', 'with', 'as',
]
keywords3 = [
# Sorts
'Type', 'Prop',
]
keywords4 = [
# Tactics
'pose', 'set', 'move', 'case', 'elim', 'apply', 'clear', 'hnf', 'intro',
'intros', 'generalize', 'rename', 'pattern', 'after', 'destruct',
'induction', 'using', 'refine', 'inversion', 'injection', 'rewrite',
'congr', 'unlock', 'compute', 'ring', 'field', 'replace', 'fold',
'unfold', 'change', 'cutrewrite', 'simpl', 'have', 'suff', 'wlog',
'suffices', 'without', 'loss', 'nat_norm', 'assert', 'cut', 'trivial',
'revert', 'bool_congr', 'nat_congr', 'symmetry', 'transitivity', 'auto',
'split', 'left', 'right', 'autorewrite',
]
keywords5 = [
# Terminators
'by', 'done', 'exact', 'reflexivity', 'tauto', 'romega', 'omega',
'assumption', 'solve', 'contradiction', 'discriminate',
]
keywords6 = [
# Control
'do', 'last', 'first', 'try', 'idtac', 'repeat',
]
# 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done',
# 'downto', 'else', 'end', 'exception', 'external', 'false',
# 'for', 'fun', 'function', 'functor', 'if', 'in', 'include',
# 'inherit', 'initializer', 'lazy', 'let', 'match', 'method',
# 'module', 'mutable', 'new', 'object', 'of', 'open', 'private',
# 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
# 'type', 'val', 'virtual', 'when', 'while', 'with'
keyopts = [
'!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-',
r'-\.', '->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<',
'<-', '=', '>', '>]', '>}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>',
r'\[\|', ']', '_', '`', '{', '{<', r'\|', r'\|]', '}', '~', '=>',
r'/\\', r'\\/',
u'Π', u'λ',
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = ['unit', 'int', 'float', 'bool', 'string', 'char', 'list',
'array']
tokens = {
'root': [
(r'\s+', Text),
(r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
(r'\(\*', Comment, 'comment'),
(r'\b(%s)\b' % '|'.join(keywords1), Keyword.Namespace),
(r'\b(%s)\b' % '|'.join(keywords2), Keyword),
(r'\b(%s)\b' % '|'.join(keywords3), Keyword.Type),
(r'\b(%s)\b' % '|'.join(keywords4), Keyword),
(r'\b(%s)\b' % '|'.join(keywords5), Keyword.Pseudo),
(r'\b(%s)\b' % '|'.join(keywords6), Keyword.Reserved),
(r'\b([A-Z][A-Za-z0-9_\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][A-Za-z0-9_\']*)', Name.Class),
(r'(%s)' % '|'.join(keyopts[::-1]), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Binary),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'string': [
(r'[^"]+', String.Double),
(r'""', String.Double),
(r'"', String.Double, '#pop'),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][A-Za-z0-9_\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][A-Za-z0-9_\']*', Name.Class, '#pop'),
(r'[a-z][a-z0-9_\']*', Name, '#pop'),
(r'', Text, '#pop')
],
}
def analyse_text(text):
if text.startswith('(*'):
return True
class NewLispLexer(RegexLexer):
"""
For `newLISP. <www.newlisp.org>`_ source code (version 10.3.0).
*New in Pygments 1.5.*
"""
name = 'NewLisp'
aliases = ['newlisp']
filenames = ['*.lsp', '*.nl']
mimetypes = ['text/x-newlisp', 'application/x-newlisp']
flags = re.IGNORECASE | re.MULTILINE | re.UNICODE
# list of built-in functions for newLISP version 10.3
builtins = [
'^', '--', '-', ':', '!', '!=', '?', '@', '*', '/', '&', '%', '+', '++',
'<', '<<', '<=', '=', '>', '>=', '>>', '|', '~', '$', '$0', '$1', '$10',
'$11', '$12', '$13', '$14', '$15', '$2', '$3', '$4', '$5', '$6', '$7',
'$8', '$9', '$args', '$idx', '$it', '$main-args', 'abort', 'abs',
'acos', 'acosh', 'add', 'address', 'amb', 'and', 'and', 'append-file',
'append', 'apply', 'args', 'array-list', 'array?', 'array', 'asin',
'asinh', 'assoc', 'atan', 'atan2', 'atanh', 'atom?', 'base64-dec',
'base64-enc', 'bayes-query', 'bayes-train', 'begin', 'begin', 'begin',
'beta', 'betai', 'bind', 'binomial', 'bits', 'callback', 'case', 'case',
'case', 'catch', 'ceil', 'change-dir', 'char', 'chop', 'Class', 'clean',
'close', 'command-event', 'cond', 'cond', 'cond', 'cons', 'constant',
'context?', 'context', 'copy-file', 'copy', 'cos', 'cosh', 'count',
'cpymem', 'crc32', 'crit-chi2', 'crit-z', 'current-line', 'curry',
'date-list', 'date-parse', 'date-value', 'date', 'debug', 'dec',
'def-new', 'default', 'define-macro', 'define-macro', 'define',
'delete-file', 'delete-url', 'delete', 'destroy', 'det', 'device',
'difference', 'directory?', 'directory', 'div', 'do-until', 'do-while',
'doargs', 'dolist', 'dostring', 'dotimes', 'dotree', 'dump', 'dup',
'empty?', 'encrypt', 'ends-with', 'env', 'erf', 'error-event',
'eval-string', 'eval', 'exec', 'exists', 'exit', 'exp', 'expand',
'explode', 'extend', 'factor', 'fft', 'file-info', 'file?', 'filter',
'find-all', 'find', 'first', 'flat', 'float?', 'float', 'floor', 'flt',
'fn', 'for-all', 'for', 'fork', 'format', 'fv', 'gammai', 'gammaln',
'gcd', 'get-char', 'get-float', 'get-int', 'get-long', 'get-string',
'get-url', 'global?', 'global', 'if-not', 'if', 'ifft', 'import', 'inc',
'index', 'inf?', 'int', 'integer?', 'integer', 'intersect', 'invert',
'irr', 'join', 'lambda-macro', 'lambda?', 'lambda', 'last-error',
'last', 'legal?', 'length', 'let', 'let', 'let', 'letex', 'letn',
'letn', 'letn', 'list?', 'list', 'load', 'local', 'log', 'lookup',
'lower-case', 'macro?', 'main-args', 'MAIN', 'make-dir', 'map', 'mat',
'match', 'max', 'member', 'min', 'mod', 'module', 'mul', 'multiply',
'NaN?', 'net-accept', 'net-close', 'net-connect', 'net-error',
'net-eval', 'net-interface', 'net-ipv', 'net-listen', 'net-local',
'net-lookup', 'net-packet', 'net-peek', 'net-peer', 'net-ping',
'net-receive-from', 'net-receive-udp', 'net-receive', 'net-select',
'net-send-to', 'net-send-udp', 'net-send', 'net-service',
'net-sessions', 'new', 'nil?', 'nil', 'normal', 'not', 'now', 'nper',
'npv', 'nth', 'null?', 'number?', 'open', 'or', 'ostype', 'pack',
'parse-date', 'parse', 'peek', 'pipe', 'pmt', 'pop-assoc', 'pop',
'post-url', 'pow', 'prefix', 'pretty-print', 'primitive?', 'print',
'println', 'prob-chi2', 'prob-z', 'process', 'prompt-event',
'protected?', 'push', 'put-url', 'pv', 'quote?', 'quote', 'rand',
'random', 'randomize', 'read', 'read-char', 'read-expr', 'read-file',
'read-key', 'read-line', 'read-utf8', 'read', 'reader-event',
'real-path', 'receive', 'ref-all', 'ref', 'regex-comp', 'regex',
'remove-dir', 'rename-file', 'replace', 'reset', 'rest', 'reverse',
'rotate', 'round', 'save', 'search', 'seed', 'seek', 'select', 'self',
'semaphore', 'send', 'sequence', 'series', 'set-locale', 'set-ref-all',
'set-ref', 'set', 'setf', 'setq', 'sgn', 'share', 'signal', 'silent',
'sin', 'sinh', 'sleep', 'slice', 'sort', 'source', 'spawn', 'sqrt',
'starts-with', 'string?', 'string', 'sub', 'swap', 'sym', 'symbol?',
'symbols', 'sync', 'sys-error', 'sys-info', 'tan', 'tanh', 'term',
'throw-error', 'throw', 'time-of-day', 'time', 'timer', 'title-case',
'trace-highlight', 'trace', 'transpose', 'Tree', 'trim', 'true?',
'true', 'unicode', 'unify', 'unique', 'unless', 'unpack', 'until',
'upper-case', 'utf8', 'utf8len', 'uuid', 'wait-pid', 'when', 'while',
'write', 'write-char', 'write-file', 'write-line', 'write',
'xfer-event', 'xml-error', 'xml-parse', 'xml-type-tags', 'zero?',
]
# valid names
valid_name = r'([a-zA-Z0-9!$%&*+.,/<=>?@^_~|-])+|(\[.*?\])+'
tokens = {
'root': [
# shebang
(r'#!(.*?)$', Comment.Preproc),
# comments starting with semicolon
(r';.*$', Comment.Single),
# comments starting with #
(r'#.*$', Comment.Single),
# whitespace
(r'\s+', Text),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
# braces
(r"{", String, "bracestring"),
# [text] ... [/text] delimited strings
(r'\[text\]*', String, "tagstring"),
# 'special' operators...
(r"('|:)", Operator),
# highlight the builtins
('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in builtins),
Keyword),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Variable),
# the remaining variables
(valid_name, String.Symbol),
# parentheses
(r'(\(|\))', Punctuation),
],
# braced strings...
'bracestring': [
("{", String, "#push"),
("}", String, "#pop"),
("[^{}]+", String),
],
# tagged [text]...[/text] delimited strings...
'tagstring': [
(r'(?s)(.*?)(\[/text\])', String, '#pop'),
],
}
class ElixirLexer(RegexLexer):
"""
For the `Elixir language <http://elixir-lang.org>`_.
*New in Pygments 1.5.*
"""
name = 'Elixir'
aliases = ['elixir', 'ex', 'exs']
filenames = ['*.ex', '*.exs']
mimetypes = ['text/x-elixir']
def gen_elixir_sigil_rules():
states = {}
states['strings'] = [
(r'(%[A-Ba-z])?"""(?:.|\n)*?"""', String.Doc),
(r"'''(?:.|\n)*?'''", String.Doc),
(r'"', String.Double, 'dqs'),
(r"'.*'", String.Single),
(r'(?<!\w)\?(\\(x\d{1,2}|\h{1,2}(?!\h)\b|0[0-7]{0,2}(?![0-7])\b|'
r'[^x0MC])|(\\[MC]-)+\w|[^\s\\])', String.Other)
]
for lbrace, rbrace, name, in ('\\{', '\\}', 'cb'), \
('\\[', '\\]', 'sb'), \
('\\(', '\\)', 'pa'), \
('\\<', '\\>', 'lt'):
states['strings'] += [
(r'%[a-z]' + lbrace, String.Double, name + 'intp'),
(r'%[A-Z]' + lbrace, String.Double, name + 'no-intp')
]
states[name +'intp'] = [
(r'' + rbrace + '[a-z]*', String.Double, "#pop"),
include('enddoublestr')
]
states[name +'no-intp'] = [
(r'.*' + rbrace + '[a-z]*', String.Double , "#pop")
]
return states
tokens = {
'root': [
(r'\s+', Text),
(r'#.*$', Comment.Single),
(r'\b(case|cond|end|bc|lc|if|unless|try|loop|receive|fn|defmodule|'
r'defp?|defprotocol|defimpl|defrecord|defmacrop?|defdelegate|'
r'defexception|exit|raise|throw|unless|after|rescue|catch|else)\b(?![?!])|'
r'(?<!\.)\b(do|\-\>)\b\s*', Keyword),
(r'\b(import|require|use|recur|quote|unquote|super|refer)\b(?![?!])',
Keyword.Namespace),
(r'(?<!\.)\b(and|not|or|when|xor|in)\b', Operator.Word),
(r'%=|\*=|\*\*=|\+=|\-=|\^=|\|\|=|'
r'<=>|<(?!<|=)|>(?!<|=|>)|<=|>=|===|==|=~|!=|!~|(?=[ \t])\?|'
r'(?<=[ \t])!+|&&|\|\||\^|\*|\+|\-|/|'
r'\||\+\+|\-\-|\*\*|\/\/|\<\-|\<\>|<<|>>|=|\.', Operator),
(r'(?<!:)(:)([a-zA-Z_]\w*([?!]|=(?![>=]))?|\<\>|===?|>=?|<=?|'
r'<=>|&&?|%\(\)|%\[\]|%\{\}|\+\+?|\-\-?|\|\|?|\!|//|[%&`/\|]|'
r'\*\*?|=?~|<\-)|([a-zA-Z_]\w*([?!])?)(:)(?!:)', String.Symbol),
(r':"', String.Symbol, 'interpoling_symbol'),
(r'\b(nil|true|false)\b(?![?!])|\b[A-Z]\w*\b', Name.Constant),
(r'\b(__(FILE|LINE|MODULE|MAIN|FUNCTION)__)\b(?![?!])', Name.Builtin.Pseudo),
(r'[a-zA-Z_!][\w_]*[!\?]?', Name),
(r'[(){};,/\|:\\\[\]]', Punctuation),
(r'@[a-zA-Z_]\w*|&\d', Name.Variable),
(r'\b(0[xX][0-9A-Fa-f]+|\d(_?\d)*(\.(?![^\d\s])'
r'(_?\d)*)?([eE][-+]?\d(_?\d)*)?|0[bB][01]+)\b', Number),
(r'%r\/.*\/', String.Regex),
include('strings'),
],
'dqs': [
(r'"', String.Double, "#pop"),
include('enddoublestr')
],
'interpoling': [
(r'#{', String.Interpol, 'interpoling_string'),
],
'interpoling_string' : [
(r'}', String.Interpol, "#pop"),
include('root')
],
'interpoling_symbol': [
(r'"', String.Symbol, "#pop"),
include('interpoling'),
(r'[^#"]+', String.Symbol),
],
'enddoublestr' : [
include('interpoling'),
(r'[^#"]+', String.Double),
]
}
tokens.update(gen_elixir_sigil_rules())
class ElixirConsoleLexer(Lexer):
"""
For Elixir interactive console (iex) output like:
.. sourcecode:: iex
iex> [head | tail] = [1,2,3]
[1,2,3]
iex> head
1
iex> tail
[2,3]
iex> [head | tail]
[1,2,3]
iex> length [head | tail]
3
*New in Pygments 1.5.*
"""
name = 'Elixir iex session'
aliases = ['iex']
mimetypes = ['text/x-elixir-shellsession']
_prompt_re = re.compile('(iex|\.{3})> ')
def get_tokens_unprocessed(self, text):
exlexer = ElixirLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith(u'** '):
insertions.append((len(curcode),
[(0, Generic.Error, line[:-1])]))
curcode += line[-1:]
else:
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
exlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
exlexer.get_tokens_unprocessed(curcode)):
yield item
class KokaLexer(RegexLexer):
"""
Lexer for the `Koka <http://koka.codeplex.com>`_
language.
*New in Pygments 1.6.*
"""
name = 'Koka'
aliases = ['koka']
filenames = ['*.kk', '*.kki']
mimetypes = ['text/x-koka']
keywords = [
'infix', 'infixr', 'infixl',
'type', 'cotype', 'rectype', 'alias',
'struct', 'con',
'fun', 'function', 'val', 'var',
'external',
'if', 'then', 'else', 'elif', 'return', 'match',
'private', 'public', 'private',
'module', 'import', 'as',
'include', 'inline',
'rec',
'try', 'yield', 'enum',
'interface', 'instance',
]
# keywords that are followed by a type
typeStartKeywords = [
'type', 'cotype', 'rectype', 'alias', 'struct', 'enum',
]
# keywords valid in a type
typekeywords = [
'forall', 'exists', 'some', 'with',
]
# builtin names and special names
builtin = [
'for', 'while', 'repeat',
'foreach', 'foreach-indexed',
'error', 'catch', 'finally',
'cs', 'js', 'file', 'ref', 'assigned',
]
# symbols that can be in an operator
symbols = '[\$%&\*\+@!/\\\^~=\.:\-\?\|<>]+'
# symbol boundary: an operator keyword should not be followed by any of these
sboundary = '(?!'+symbols+')'
# name boundary: a keyword should not be followed by any of these
boundary = '(?![\w/])'
# koka token abstractions
tokenType = Name.Attribute
tokenTypeDef = Name.Class
tokenConstructor = Generic.Emph
# main lexer
tokens = {
'root': [
include('whitespace'),
# go into type mode
(r'::?' + sboundary, tokenType, 'type'),
(r'(alias)(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef),
'alias-type'),
(r'(struct)(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef),
'struct-type'),
((r'(%s)' % '|'.join(typeStartKeywords)) +
r'(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef),
'type'),
# special sequences of tokens (we use ?: for non-capturing group as
# required by 'bygroups')
(r'(module)(\s+)(interface\s+)?((?:[a-z]\w*/)*[a-z]\w*)',
bygroups(Keyword, Text, Keyword, Name.Namespace)),
(r'(import)(\s+)((?:[a-z]\w*/)*[a-z]\w*)'
r'(?:(\s*)(=)(\s*)((?:qualified\s*)?)'
r'((?:[a-z]\w*/)*[a-z]\w*))?',
bygroups(Keyword, Text, Name.Namespace, Text, Keyword, Text,
Keyword, Name.Namespace)),
(r'(^(?:(?:public|private)\s*)?(?:function|fun|val))'
r'(\s+)([a-z]\w*|\((?:' + symbols + r'|/)\))',
bygroups(Keyword, Text, Name.Function)),
(r'(^(?:(?:public|private)\s*)?external)(\s+)(inline\s+)?'
r'([a-z]\w*|\((?:' + symbols + r'|/)\))',
bygroups(Keyword, Text, Keyword, Name.Function)),
# keywords
(r'(%s)' % '|'.join(typekeywords) + boundary, Keyword.Type),
(r'(%s)' % '|'.join(keywords) + boundary, Keyword),
(r'(%s)' % '|'.join(builtin) + boundary, Keyword.Pseudo),
(r'::?|:=|\->|[=\.]' + sboundary, Keyword),
# names
(r'((?:[a-z]\w*/)*)([A-Z]\w*)',
bygroups(Name.Namespace, tokenConstructor)),
(r'((?:[a-z]\w*/)*)([a-z]\w*)', bygroups(Name.Namespace, Name)),
(r'((?:[a-z]\w*/)*)(\((?:' + symbols + r'|/)\))',
bygroups(Name.Namespace, Name)),
(r'_\w*', Name.Variable),
# literal string
(r'@"', String.Double, 'litstring'),
# operators
(symbols + "|/(?![\*/])", Operator),
(r'`', Operator),
(r'[\{\}\(\)\[\];,]', Punctuation),
# literals. No check for literal characters with len > 1
(r'[0-9]+\.[0-9]+([eE][\-\+]?[0-9]+)?', Number.Float),
(r'0[xX][0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r"'", String.Char, 'char'),
(r'"', String.Double, 'string'),
],
# type started by alias
'alias-type': [
(r'=',Keyword),
include('type')
],
# type started by struct
'struct-type': [
(r'(?=\((?!,*\)))',Punctuation, '#pop'),
include('type')
],
# type started by colon
'type': [
(r'[\(\[<]', tokenType, 'type-nested'),
include('type-content')
],
# type nested in brackets: can contain parameters, comma etc.
'type-nested': [
(r'[\)\]>]', tokenType, '#pop'),
(r'[\(\[<]', tokenType, 'type-nested'),
(r',', tokenType),
(r'([a-z]\w*)(\s*)(:)(?!:)',
bygroups(Name, Text, tokenType)), # parameter name
include('type-content')
],
# shared contents of a type
'type-content': [
include('whitespace'),
# keywords
(r'(%s)' % '|'.join(typekeywords) + boundary, Keyword),
(r'(?=((%s)' % '|'.join(keywords) + boundary + '))',
Keyword, '#pop'), # need to match because names overlap...
# kinds
(r'[EPHVX]' + boundary, tokenType),
# type names
(r'[a-z][0-9]*(?![\w/])', tokenType ),
(r'_\w*', tokenType.Variable), # Generic.Emph
(r'((?:[a-z]\w*/)*)([A-Z]\w*)',
bygroups(Name.Namespace, tokenType)),
(r'((?:[a-z]\w*/)*)([a-z]\w+)',
bygroups(Name.Namespace, tokenType)),
# type keyword operators
(r'::|\->|[\.:|]', tokenType),
#catchall
(r'', Text, '#pop')
],
# comments and literals
'whitespace': [
(r'\n\s*#.*$', Comment.Preproc),
(r'\s+', Text),
(r'/\*', Comment.Multiline, 'comment'),
(r'//.*$', Comment.Single)
],
'comment': [
(r'[^/\*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[\*/]', Comment.Multiline),
],
'litstring': [
(r'[^"]+', String.Double),
(r'""', String.Escape),
(r'"', String.Double, '#pop'),
],
'string': [
(r'[^\\"\n]+', String.Double),
include('escape-sequence'),
(r'["\n]', String.Double, '#pop'),
],
'char': [
(r'[^\\\'\n]+', String.Char),
include('escape-sequence'),
(r'[\'\n]', String.Char, '#pop'),
],
'escape-sequence': [
(r'\\[nrt\\\"\']', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
# Yes, \U literals are 6 hex digits.
(r'\\U[0-9a-fA-F]{6}', String.Escape)
]
}
| mit |
kaustubhhiware/coala-bears | bears/ruby/RubySmellBear.py | 21 | 8585 | import json
from coalib.bearlib import deprecate_settings
from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.GemRequirement import GemRequirement
from coalib.results.Result import Result
from coalib.results.SourceRange import SourceRange
from coala_utils.param_conversion import negate
@linter(executable='reek', use_stdin=True)
class RubySmellBear:
"""
Detect code smells in Ruby source code.
For more information about the detected smells, see
<https://github.com/troessner/reek/blob/master/docs/Code-Smells.md>.
"""
LANGUAGES = {'Ruby'}
REQUIREMENTS = {GemRequirement('reek')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Smell'}
@staticmethod
def create_arguments(filename, file, config_file):
return '--format', 'json', '-c', config_file
def process_output(self, output, filename, file):
output = json.loads(output) if output else ()
for issue in output:
sourceranges = []
for line in issue['lines']:
sourceranges.append(SourceRange.from_values(
file=filename, start_line=line))
if 'name' in issue:
message = "'{}' (in '{}') {}.".format(
issue['name'], issue['context'], issue['message'])
else:
message = "'{}' {}".format(issue['context'], issue['message'])
yield Result(
origin='{} ({})'.format(self.__class__.__name__,
issue['smell_type']),
message=message,
affected_code=sourceranges,
additional_info='More information is available at {}'
'.'.format(issue['wiki_link']))
@deprecate_settings(allow_duplicate_method=(
'duplicate_method_call', negate),
allow_data_clump=('data_clump', negate),
allow_control_parameters=('control_parameter', negate),
allow_class_variables=('class_variable', negate),
allow_boolean_parameter_in_functions=(
'boolean_parameter', negate),
allow_setter_in_classes=('attribute', negate),
allow_unused_private_methods=(
'unused_private_method', negate),
allow_unused_variables=('unused_params', negate))
def generate_config(self,
allow_setter_in_classes: bool=False,
allow_boolean_parameter_in_functions: bool=False,
allow_class_variables: bool=False,
allow_control_parameters: bool=False,
allow_data_clump: bool=False,
allow_duplicate_method: bool=False,
feature_envy: bool=True,
missing_module_description: bool=True,
long_param_list: bool=True,
long_yield_list: bool=True,
module_initialize: bool=True,
nested_iterators: bool=True,
nil_check: bool=True,
prima_donna_method: bool=True,
repeated_conditional: bool=True,
too_many_instance_variables: bool=True,
too_many_methods: bool=True,
too_long_method: bool=True,
bad_method_name: bool=True,
bad_module_name: bool=True,
bad_param_name: bool=True,
bad_var_name: bool=True,
allow_unused_variables: bool=False,
allow_unused_private_methods: bool=True,
utility_function: bool=True):
"""
:param allow_setter_in_classes:
Allows setter in classes.
:param allow_boolean_parameter_in_functions:
Allows boolean parameter in functions (control coupling).
:param allow_class_variables:
Allows class variables.
:param allow_control_parameters:
Allows parameters that control function behaviour (control
coupling).
:param allow_data_clump:
Does not warn when the same two or three items frequently appear
together in function/class parameter list.
:param allow_duplicate_method:
Allows having two fragments of code that look nearly identical, or
two fragments of code that have nearly identical effects at some
conceptual level.
:param feature_envy:
Occurs when a code fragment references another object more often
than it references itself, or when several clients do the same
series of manipulations on a particular type of object.
:param missing_module_description:
Warns if a module description is missing.
:param long_param_list:
Warns about too many parameters of functions.
:param long_yield_list:
Warns when a method yields a lot of arguments to the block it gets
passed.
:param module_initialize:
Warns about ``#initialize`` methods in modules.
:param nested_iterators:
Warns when a block contains another block.
:param nil_check:
Warns about nil checks.
:param prima_donna_method:
Warns about methods whose names end with an exclamation mark.
:param repeated_conditional:
Warns about repeated conditionals.
:param too_many_instance_variables:
Warns for too many instance variables.
:param too_many_methods:
Warns if a class has too many methods.
:param too_long_method:
Warns about huge methods.
:param bad_method_name:
Warns about method names which are not communicating the purpose
of the method well.
:param bad_module_name:
Warns about module names which are not communicating the purpose
of the module well.
:param bad_param_name:
Warns about parameter names which are not communicating the purpose
of the parameter well.
:param bad_var_name:
Warns about variable names which are not communicating the purpose
of the variable well.
:param allow_unused_variables:
Allows unused parameters though they are dead code.
:param check_unused_private_methods:
Warns about unused private methods, as they are dead code.
:param utility_function:
Allows any instance method that has no dependency on the state of
the instance.
"""
config = {
'Attribute': not allow_setter_in_classes,
'BooleanParameter': not allow_boolean_parameter_in_functions,
'ClassVariable': not allow_class_variables,
'ControlParameter': not allow_control_parameters,
'DataClump': not allow_data_clump,
'DuplicateMethodCall': not allow_duplicate_method,
'FeatureEnvy': feature_envy,
'IrresponsibleModule': missing_module_description,
'LongParameterList': long_param_list,
'LongYieldList': long_yield_list,
'ModuleInitialize': module_initialize,
'NestedIterators': nested_iterators,
'NilCheck': nil_check,
'PrimaDonnaMethod': prima_donna_method,
'RepeatedConditional': repeated_conditional,
'TooManyInstanceVariables': too_many_instance_variables,
'TooManyMethods': too_many_methods,
'TooManyStatements': too_long_method,
'UncommunicativeMethodName': bad_method_name,
'UncommunicativeModuleName': bad_module_name,
'UncommunicativeParameterName': bad_param_name,
'UncommunicativeVariableName': bad_var_name,
'UnusedParameters': not allow_unused_variables,
'UnusedPrivateMethod': not allow_unused_private_methods,
'UtilityFunction': utility_function}
return ('---\n' +
'\n'.join('{}:\n enabled: {}'.format(key, str(value).lower())
for key, value in config.items()))
| agpl-3.0 |
selahssea/ggrc-core | src/ggrc_basic_permissions/migrations/versions/20141217235626_51e046bb002_ensure_all_audit_leads_have_program_.py | 7 | 1449 | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Ensure all Audit Leads have Program Editor role or better
Revision ID: 51e046bb002
Revises: 581a9621fac1
Create Date: 2014-12-17 23:56:26.323023
"""
# revision identifiers, used by Alembic.
revision = '51e046bb002'
down_revision = '581a9621fac1'
from alembic import op
import sqlalchemy as sa
def upgrade():
#1: remove programreader roles for assignees (they need to be program editor)
op.execute("""
DELETE ur
FROM user_roles ur, contexts c, audits a, roles r, programs p
WHERE a.program_id=p.id AND p.context_id=c.id AND ur.context_id=c.id AND ur.role_id=r.id
AND a.contact_id=ur.person_id
AND r.name = "ProgramReader"
""")
#2: give assignees with no roles in the program ProgramEditor
op.execute("""
INSERT INTO user_roles (role_id, created_at, updated_at, context_id, person_id)
SELECT distinct (SELECT id from roles where name='ProgramEditor') as role_id,
now() as created_at,
now() as updated_at,
p.context_id,
a.contact_id
FROM audits a INNER JOIN programs p on a.program_id=p.id
INNER JOIN contexts c on p.context_id=c.id
LEFT OUTER JOIN user_roles ur on c.id=ur.context_id and a.contact_id=ur.person_id
WHERE ur.person_id IS NULL
""")
def downgrade():
pass
| apache-2.0 |
tomlof/scikit-learn | doc/sphinxext/sphinx_gallery/docs_resolv.py | 23 | 16468 | # -*- coding: utf-8 -*-
# Author: Óscar Nájera
# License: 3-clause BSD
###############################################################################
# Documentation link resolver objects
from __future__ import print_function
import gzip
import os
import posixpath
import re
import shelve
import sys
# Try Python 2 first, otherwise load from Python 3
try:
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
from io import StringIO
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
return data
def get_data(url, gallery_dir):
"""Persistent dictionary usage to retrieve the search indexes"""
# shelve keys need to be str in python 2
if sys.version_info[0] == 2 and isinstance(url, unicode):
url = url.encode('utf-8')
cached_file = os.path.join(gallery_dir, 'searchindex')
search_index = shelve.open(cached_file)
if url in search_index:
data = search_index[url]
else:
data = _get_data(url)
search_index[url] = data
search_index.close()
return data
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, gallery_dir, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.gallery_dir = gallery_dir
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url, gallery_dir)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx]
# In 1.5+ Sphinx seems to have changed from .rst.html to only
# .html extension in converted files. But URLs could be
# built with < 1.5 or >= 1.5 regardless of what we're currently
# building with, so let's just check both :(
fnames = [fname + '.html', os.path.splitext(fname)[0] + '.html']
for fname in fnames:
try:
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link, self.gallery_dir)
self._page_cache[link] = html
except (HTTPError, URLError, IOError):
pass
else:
break
else:
raise
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
def _embed_code_links(app, gallery_conf, gallery_dir):
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
for this_module, url in gallery_conf['reference_url'].items():
try:
if url is None:
doc_resolvers[this_module] = SphinxDocLinkResolver(
app.builder.outdir,
gallery_dir,
relative=True)
else:
doc_resolvers[this_module] = SphinxDocLinkResolver(url,
gallery_dir)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"Internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
html_gallery_dir = os.path.abspath(os.path.join(app.builder.outdir,
gallery_dir))
# patterns for replacement
link_pattern = ('<a href="%s" class="sphx-glr-code-links" '
'tooltip="Link to documentation for %s">%s</a>')
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_gallery_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_gallery_dir, dirpath, fname)
subpath = dirpath[len(html_gallery_dir) + 1:]
pickle_fname = os.path.join(gallery_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
if isinstance(e, HTTPError):
extra = e.code
else:
extra = e.reason
print("\t\tError resolving %s.%s: %r (%s)"
% (cobj['module'], cobj['name'], e, extra))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
full_function_name = '%s.%s' % (
cobj['module'], cobj['name'])
str_repl[name_html] = link_pattern % (
link, full_function_name, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
# No need to waste time embedding hyperlinks when not running the examples
# XXX: also at the time of writing this fixes make html-noplot
# for some reason I don't fully understand
if not app.builder.config.plot_gallery:
return
# XXX: Whitelist of builders for which it makes sense to embed
# hyperlinks inside the example html. Note that the link embedding
# require searchindex.js to exist for the links to the local doc
# and there does not seem to be a good way of knowing which
# builders creates a searchindex.js.
if app.builder.name not in ['html', 'readthedocs']:
return
print('Embedding documentation hyperlinks in examples..')
gallery_conf = app.config.sphinx_gallery_conf
gallery_dirs = gallery_conf['gallery_dirs']
if not isinstance(gallery_dirs, list):
gallery_dirs = [gallery_dirs]
for gallery_dir in gallery_dirs:
_embed_code_links(app, gallery_conf, gallery_dir)
| bsd-3-clause |
Dees-Troy/android_kernel_asus_tf700t | tools/perf/python/twatch.py | 3213 | 1338 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
jchbh-duplicate/shadowsocks | shadowsocks/tcprelay.py | 922 | 28870 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import time
import socket
import errno
import struct
import logging
import traceback
import random
from shadowsocks import encrypt, eventloop, shell, common
from shadowsocks.common import parse_header
# we clear at most TIMEOUTS_CLEAN_SIZE timeouts each time
TIMEOUTS_CLEAN_SIZE = 512
MSG_FASTOPEN = 0x20000000
# SOCKS command definition
CMD_CONNECT = 1
CMD_BIND = 2
CMD_UDP_ASSOCIATE = 3
# for each opening port, we have a TCP Relay
# for each connection, we have a TCP Relay Handler to handle the connection
# for each handler, we have 2 sockets:
# local: connected to the client
# remote: connected to remote server
# for each handler, it could be at one of several stages:
# as sslocal:
# stage 0 SOCKS hello received from local, send hello to local
# stage 1 addr received from local, query DNS for remote
# stage 2 UDP assoc
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
# as ssserver:
# stage 0 just jump to stage 1
# stage 1 addr received from local, query DNS for remote
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
STAGE_INIT = 0
STAGE_ADDR = 1
STAGE_UDP_ASSOC = 2
STAGE_DNS = 3
STAGE_CONNECTING = 4
STAGE_STREAM = 5
STAGE_DESTROYED = -1
# for each handler, we have 2 stream directions:
# upstream: from client to server direction
# read local and write to remote
# downstream: from server to client direction
# read remote and write to local
STREAM_UP = 0
STREAM_DOWN = 1
# for each stream, it's waiting for reading, or writing, or both
WAIT_STATUS_INIT = 0
WAIT_STATUS_READING = 1
WAIT_STATUS_WRITING = 2
WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING
BUF_SIZE = 32 * 1024
class TCPRelayHandler(object):
def __init__(self, server, fd_to_handlers, loop, local_sock, config,
dns_resolver, is_local):
self._server = server
self._fd_to_handlers = fd_to_handlers
self._loop = loop
self._local_sock = local_sock
self._remote_sock = None
self._config = config
self._dns_resolver = dns_resolver
# TCP Relay works as either sslocal or ssserver
# if is_local, this is sslocal
self._is_local = is_local
self._stage = STAGE_INIT
self._encryptor = encrypt.Encryptor(config['password'],
config['method'])
self._fastopen_connected = False
self._data_to_write_to_local = []
self._data_to_write_to_remote = []
self._upstream_status = WAIT_STATUS_READING
self._downstream_status = WAIT_STATUS_INIT
self._client_address = local_sock.getpeername()[:2]
self._remote_address = None
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
if is_local:
self._chosen_server = self._get_a_server()
fd_to_handlers[local_sock.fileno()] = self
local_sock.setblocking(False)
local_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
loop.add(local_sock, eventloop.POLL_IN | eventloop.POLL_ERR,
self._server)
self.last_activity = 0
self._update_activity()
def __hash__(self):
# default __hash__ is id / 16
# we want to eliminate collisions
return id(self)
@property
def remote_address(self):
return self._remote_address
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _update_activity(self, data_len=0):
# tell the TCP Relay we have activities recently
# else it will think we are inactive and timed out
self._server.update_activity(self, data_len)
def _update_stream(self, stream, status):
# update a stream to a new waiting status
# check if status is changed
# only update if dirty
dirty = False
if stream == STREAM_DOWN:
if self._downstream_status != status:
self._downstream_status = status
dirty = True
elif stream == STREAM_UP:
if self._upstream_status != status:
self._upstream_status = status
dirty = True
if dirty:
if self._local_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
if self._upstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
self._loop.modify(self._local_sock, event)
if self._remote_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
if self._upstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
self._loop.modify(self._remote_sock, event)
def _write_to_sock(self, data, sock):
# write data to sock
# if only some of the data are written, put remaining in the buffer
# and update the stream to wait for writing
if not data or not sock:
return False
uncomplete = False
try:
l = len(data)
s = sock.send(data)
if s < l:
data = data[s:]
uncomplete = True
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
uncomplete = True
else:
shell.print_exception(e)
self.destroy()
return False
if uncomplete:
if sock == self._local_sock:
self._data_to_write_to_local.append(data)
self._update_stream(STREAM_DOWN, WAIT_STATUS_WRITING)
elif sock == self._remote_sock:
self._data_to_write_to_remote.append(data)
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
else:
logging.error('write_all_to_sock:unknown socket')
else:
if sock == self._local_sock:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
elif sock == self._remote_sock:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
else:
logging.error('write_all_to_sock:unknown socket')
return True
def _handle_stage_connecting(self, data):
if self._is_local:
data = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data)
if self._is_local and not self._fastopen_connected and \
self._config['fast_open']:
# for sslocal and fastopen, we basically wait for data and use
# sendto to connect
try:
# only connect once
self._fastopen_connected = True
remote_sock = \
self._create_remote_socket(self._chosen_server[0],
self._chosen_server[1])
self._loop.add(remote_sock, eventloop.POLL_ERR, self._server)
data = b''.join(self._data_to_write_to_remote)
l = len(data)
s = remote_sock.sendto(data, MSG_FASTOPEN, self._chosen_server)
if s < l:
data = data[s:]
self._data_to_write_to_remote = [data]
else:
self._data_to_write_to_remote = []
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == errno.EINPROGRESS:
# in this case data is not sent at all
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
elif eventloop.errno_from_exception(e) == errno.ENOTCONN:
logging.error('fast open not supported on this OS')
self._config['fast_open'] = False
self.destroy()
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _handle_stage_addr(self, data):
try:
if self._is_local:
cmd = common.ord(data[1])
if cmd == CMD_UDP_ASSOCIATE:
logging.debug('UDP associate')
if self._local_sock.family == socket.AF_INET6:
header = b'\x05\x00\x00\x04'
else:
header = b'\x05\x00\x00\x01'
addr, port = self._local_sock.getsockname()[:2]
addr_to_send = socket.inet_pton(self._local_sock.family,
addr)
port_to_send = struct.pack('>H', port)
self._write_to_sock(header + addr_to_send + port_to_send,
self._local_sock)
self._stage = STAGE_UDP_ASSOC
# just wait for the client to disconnect
return
elif cmd == CMD_CONNECT:
# just trim VER CMD RSV
data = data[3:]
else:
logging.error('unknown command %d', cmd)
self.destroy()
return
header_result = parse_header(data)
if header_result is None:
raise Exception('can not parse header')
addrtype, remote_addr, remote_port, header_length = header_result
logging.info('connecting %s:%d from %s:%d' %
(common.to_str(remote_addr), remote_port,
self._client_address[0], self._client_address[1]))
self._remote_address = (common.to_str(remote_addr), remote_port)
# pause reading
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
self._stage = STAGE_DNS
if self._is_local:
# forward address to remote
self._write_to_sock((b'\x05\x00\x00\x01'
b'\x00\x00\x00\x00\x10\x10'),
self._local_sock)
data_to_send = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data_to_send)
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(self._chosen_server[0],
self._handle_dns_resolved)
else:
if len(data) > header_length:
self._data_to_write_to_remote.append(data[header_length:])
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(remote_addr,
self._handle_dns_resolved)
except Exception as e:
self._log_error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _create_remote_socket(self, ip, port):
addrs = socket.getaddrinfo(ip, port, 0, socket.SOCK_STREAM,
socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("getaddrinfo failed for %s:%d" % (ip, port))
af, socktype, proto, canonname, sa = addrs[0]
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
raise Exception('IP %s is in forbidden list, reject' %
common.to_str(sa[0]))
remote_sock = socket.socket(af, socktype, proto)
self._remote_sock = remote_sock
self._fd_to_handlers[remote_sock.fileno()] = self
remote_sock.setblocking(False)
remote_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
return remote_sock
def _handle_dns_resolved(self, result, error):
if error:
self._log_error(error)
self.destroy()
return
if result:
ip = result[1]
if ip:
try:
self._stage = STAGE_CONNECTING
remote_addr = ip
if self._is_local:
remote_port = self._chosen_server[1]
else:
remote_port = self._remote_address[1]
if self._is_local and self._config['fast_open']:
# for fastopen:
# wait for more data to arrive and send them in one SYN
self._stage = STAGE_CONNECTING
# we don't have to wait for remote since it's not
# created
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
# TODO when there is already data in this packet
else:
# else do connect
remote_sock = self._create_remote_socket(remote_addr,
remote_port)
try:
remote_sock.connect((remote_addr, remote_port))
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == \
errno.EINPROGRESS:
pass
self._loop.add(remote_sock,
eventloop.POLL_ERR | eventloop.POLL_OUT,
self._server)
self._stage = STAGE_CONNECTING
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
return
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _on_local_read(self):
# handle all local read events and dispatch them to methods for
# each stage
if not self._local_sock:
return
is_local = self._is_local
data = None
try:
data = self._local_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(len(data))
if not is_local:
data = self._encryptor.decrypt(data)
if not data:
return
if self._stage == STAGE_STREAM:
if self._is_local:
data = self._encryptor.encrypt(data)
self._write_to_sock(data, self._remote_sock)
return
elif is_local and self._stage == STAGE_INIT:
# TODO check auth method
self._write_to_sock(b'\x05\00', self._local_sock)
self._stage = STAGE_ADDR
return
elif self._stage == STAGE_CONNECTING:
self._handle_stage_connecting(data)
elif (is_local and self._stage == STAGE_ADDR) or \
(not is_local and self._stage == STAGE_INIT):
self._handle_stage_addr(data)
def _on_remote_read(self):
# handle all remote read events
data = None
try:
data = self._remote_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(len(data))
if self._is_local:
data = self._encryptor.decrypt(data)
else:
data = self._encryptor.encrypt(data)
try:
self._write_to_sock(data, self._local_sock)
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
# TODO use logging when debug completed
self.destroy()
def _on_local_write(self):
# handle local writable event
if self._data_to_write_to_local:
data = b''.join(self._data_to_write_to_local)
self._data_to_write_to_local = []
self._write_to_sock(data, self._local_sock)
else:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
def _on_remote_write(self):
# handle remote writable event
self._stage = STAGE_STREAM
if self._data_to_write_to_remote:
data = b''.join(self._data_to_write_to_remote)
self._data_to_write_to_remote = []
self._write_to_sock(data, self._remote_sock)
else:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
def _on_local_error(self):
logging.debug('got local error')
if self._local_sock:
logging.error(eventloop.get_sock_error(self._local_sock))
self.destroy()
def _on_remote_error(self):
logging.debug('got remote error')
if self._remote_sock:
logging.error(eventloop.get_sock_error(self._remote_sock))
self.destroy()
def handle_event(self, sock, event):
# handle all events in this handler and dispatch them to methods
if self._stage == STAGE_DESTROYED:
logging.debug('ignore handle_event: destroyed')
return
# order is important
if sock == self._remote_sock:
if event & eventloop.POLL_ERR:
self._on_remote_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_remote_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_remote_write()
elif sock == self._local_sock:
if event & eventloop.POLL_ERR:
self._on_local_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_local_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_local_write()
else:
logging.warn('unknown socket')
def _log_error(self, e):
logging.error('%s when handling connection from %s:%d' %
(e, self._client_address[0], self._client_address[1]))
def destroy(self):
# destroy the handler and release any resources
# promises:
# 1. destroy won't make another destroy() call inside
# 2. destroy releases resources so it prevents future call to destroy
# 3. destroy won't raise any exceptions
# if any of the promises are broken, it indicates a bug has been
# introduced! mostly likely memory leaks, etc
if self._stage == STAGE_DESTROYED:
# this couldn't happen
logging.debug('already destroyed')
return
self._stage = STAGE_DESTROYED
if self._remote_address:
logging.debug('destroy: %s:%d' %
self._remote_address)
else:
logging.debug('destroy')
if self._remote_sock:
logging.debug('destroying remote')
self._loop.remove(self._remote_sock)
del self._fd_to_handlers[self._remote_sock.fileno()]
self._remote_sock.close()
self._remote_sock = None
if self._local_sock:
logging.debug('destroying local')
self._loop.remove(self._local_sock)
del self._fd_to_handlers[self._local_sock.fileno()]
self._local_sock.close()
self._local_sock = None
self._dns_resolver.remove_callback(self._handle_dns_resolved)
self._server.remove_handler(self)
class TCPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
self._is_local = is_local
self._dns_resolver = dns_resolver
self._closed = False
self._eventloop = None
self._fd_to_handlers = {}
self._timeout = config['timeout']
self._timeouts = [] # a list for all the handlers
# we trim the timeouts once a while
self._timeout_offset = 0 # last checked position for timeout
self._handler_to_timeouts = {} # key: handler value: index in timeouts
if is_local:
listen_addr = config['local_address']
listen_port = config['local_port']
else:
listen_addr = config['server']
listen_port = config['server_port']
self._listen_port = listen_port
addrs = socket.getaddrinfo(listen_addr, listen_port, 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(listen_addr, listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(sa)
server_socket.setblocking(False)
if config['fast_open']:
try:
server_socket.setsockopt(socket.SOL_TCP, 23, 5)
except socket.error:
logging.error('warning: fast open is not available')
self._config['fast_open'] = False
server_socket.listen(1024)
self._server_socket = server_socket
self._stat_callback = stat_callback
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
self._eventloop.add(self._server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
self._eventloop.add_periodic(self.handle_periodic)
def remove_handler(self, handler):
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
del self._handler_to_timeouts[hash(handler)]
def update_activity(self, handler, data_len):
if data_len and self._stat_callback:
self._stat_callback(self._listen_port, data_len)
# set handler to active
now = int(time.time())
if now - handler.last_activity < eventloop.TIMEOUT_PRECISION:
# thus we can lower timeout modification frequency
return
handler.last_activity = now
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
length = len(self._timeouts)
self._timeouts.append(handler)
self._handler_to_timeouts[hash(handler)] = length
def _sweep_timeout(self):
# tornado's timeout memory management is more flexible than we need
# we just need a sorted last_activity queue and it's faster than heapq
# in fact we can do O(1) insertion/remove so we invent our own
if self._timeouts:
logging.log(shell.VERBOSE_LEVEL, 'sweeping timeouts')
now = time.time()
length = len(self._timeouts)
pos = self._timeout_offset
while pos < length:
handler = self._timeouts[pos]
if handler:
if now - handler.last_activity < self._timeout:
break
else:
if handler.remote_address:
logging.warn('timed out: %s:%d' %
handler.remote_address)
else:
logging.warn('timed out')
handler.destroy()
self._timeouts[pos] = None # free memory
pos += 1
else:
pos += 1
if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1:
# clean up the timeout queue when it gets larger than half
# of the queue
self._timeouts = self._timeouts[pos:]
for key in self._handler_to_timeouts:
self._handler_to_timeouts[key] -= pos
pos = 0
self._timeout_offset = pos
def handle_event(self, sock, fd, event):
# handle events and dispatch to handlers
if sock:
logging.log(shell.VERBOSE_LEVEL, 'fd %d %s', fd,
eventloop.EVENT_NAMES.get(event, event))
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
# TODO
raise Exception('server_socket error')
try:
logging.debug('accept')
conn = self._server_socket.accept()
TCPRelayHandler(self, self._fd_to_handlers,
self._eventloop, conn[0], self._config,
self._dns_resolver, self._is_local)
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
else:
if sock:
handler = self._fd_to_handlers.get(fd, None)
if handler:
handler.handle_event(sock, event)
else:
logging.warn('poll removed fd')
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._eventloop.remove(self._server_socket)
self._server_socket.close()
self._server_socket = None
logging.info('closed TCP port %d', self._listen_port)
if not self._fd_to_handlers:
logging.info('stopping')
self._eventloop.stop()
self._sweep_timeout()
def close(self, next_tick=False):
logging.debug('TCP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for handler in list(self._fd_to_handlers.values()):
handler.destroy()
| apache-2.0 |
googleapis/googleapis-gen | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/user_list_service/transports/grpc.py | 1 | 11974 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v8.resources.types import user_list
from google.ads.googleads.v8.services.types import user_list_service
from .base import UserListServiceTransport, DEFAULT_CLIENT_INFO
class UserListServiceGrpcTransport(UserListServiceTransport):
"""gRPC backend transport for UserListService.
Service to manage user lists.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning)
host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
)
@classmethod
def create_channel(cls,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_user_list(self) -> Callable[
[user_list_service.GetUserListRequest],
user_list.UserList]:
r"""Return a callable for the get user list method over gRPC.
Returns the requested user list.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetUserListRequest],
~.UserList]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_user_list' not in self._stubs:
self._stubs['get_user_list'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v8.services.UserListService/GetUserList',
request_serializer=user_list_service.GetUserListRequest.serialize,
response_deserializer=user_list.UserList.deserialize,
)
return self._stubs['get_user_list']
@property
def mutate_user_lists(self) -> Callable[
[user_list_service.MutateUserListsRequest],
user_list_service.MutateUserListsResponse]:
r"""Return a callable for the mutate user lists method over gRPC.
Creates or updates user lists. Operation statuses are returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`DatabaseError <>`__ `DistinctError <>`__ `FieldError <>`__
`FieldMaskError <>`__ `HeaderError <>`__ `InternalError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`NotAllowlistedError <>`__ `NotEmptyError <>`__
`OperationAccessDeniedError <>`__ `QuotaError <>`__
`RangeError <>`__ `RequestError <>`__ `StringFormatError <>`__
`StringLengthError <>`__ `UserListError <>`__
Returns:
Callable[[~.MutateUserListsRequest],
~.MutateUserListsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'mutate_user_lists' not in self._stubs:
self._stubs['mutate_user_lists'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v8.services.UserListService/MutateUserLists',
request_serializer=user_list_service.MutateUserListsRequest.serialize,
response_deserializer=user_list_service.MutateUserListsResponse.deserialize,
)
return self._stubs['mutate_user_lists']
__all__ = (
'UserListServiceGrpcTransport',
)
| apache-2.0 |
rwgdrummer/maskgen | other_plugins/CocoMaskSelector/__init__.py | 1 | 1743 | import maskgen
from maskgen_coco import createMaskImageWithParams
import sys
from maskgen.image_wrap import ImageWrapper
"""
Selects a Mask from Coco presegmented images
"""
def transform(img, source, target, **kwargs):
areaConstraints = (int(kwargs['area.lower.bound']) if 'area.lower.bound' in kwargs else 0,
int(kwargs['area.upper.bound']) if 'area.upper.bound' in kwargs else sys.maxint)
annotation,mask =createMaskImageWithParams(np.asarray(img), source, kwargs, areaConstraint=areaConstraints)
ImageWrapper(mask).save(target)
return {'subject':annotation},None
def operation():
return {'name': 'SelectRegion',
'category': 'Select',
'software': 'maskgen',
'version': maskgen.__version__[0:6],
'arguments': {
'coco': {
"type": "str",
"description": "Coco Object."
},
'coco.index': {
"type": "str",
"description": "Coco file->id Dictionary"
},
'area.lower.bound': {
"type": "int[0:100000000000]",
"description": "lower bound on area of segment in pixels"
},
'area.upper.bound': {
"type": "int[0:100000000000]",
"description": "upper bound on area of segment in pixels"
}
},
'description': 'Create a limited selection in a donor image. The provided inputmask is placed as the alpha channel of the result image',
'transitions': [
'image.image'
]
}
def suffix():
return '.png'
| bsd-3-clause |
40223136/2015cd_0505 | static/Brython3.1.1-20150328-091302/Lib/multiprocessing/util.py | 696 | 9917 | #
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import sys
import functools
import os
import itertools
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from subprocess import _args_from_interpreter_flags
from multiprocessing.process import current_process, active_children
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
#
# Function returning a temp directory which will be removed on exit
#
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir
return current_process()._tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception as e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, next(_finalizer_counter))
self._pid = os.getpid()
_finalizer_registry[self._key] = self
def __call__(self, wr=None,
# Need to bind these locally because the globals can have
# been cleared at shutdown
_finalizer_registry=_finalizer_registry,
sub_debug=sub_debug, getpid=os.getpid):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
if self._pid != getpid():
sub_debug('finalizer ignored because different process')
res = None
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if _finalizer_registry is None:
# This function may be called after this module's globals are
# destroyed. See the _exit_function function in this module for more
# notes.
return
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = [x for x in list(_finalizer_registry.items()) if f(x)]
items.sort(reverse=True)
for key, finalizer in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
active_children=active_children,
current_process=current_process):
# We hold on to references to functions in the arglist due to the
# situation described below, where this function is called after this
# module's globals are destroyed.
global _exiting
if not _exiting:
_exiting = True
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
if current_process() is not None:
# We check if the current process is None here because if
# it's None, any call to ``active_children()`` will raise
# an AttributeError (active_children winds up trying to
# get attributes from util._current_process). One
# situation where this can happen is if someone has
# manipulated sys.modules, causing this module to be
# garbage collected. The destructor for the module type
# then replaces all values in the module dict with None.
# For instance, after setuptools runs a test it replaces
# sys.modules with a copy created earlier. See issues
# #9775 and #15881. Also related: #4106, #9205, and
# #9207.
for p in active_children():
if p._daemonic:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._reset()
register_after_fork(self, ForkAwareThreadLock._reset)
def _reset(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
| agpl-3.0 |
osmr/utct | MXNet/trainer.py | 1 | 5876 | import logging
from .train_controller import TrainController, TrainControllerStopException
from utct.common.trainer_template import TrainerTemplate
import mxnet as mx
class Trainer(TrainerTemplate):
"""
Class, which provides training process under MXNet framework.
Parameters:
----------
model : object
instance of Model class with graph of CNN
optimizer : object
instance of Optimizer class with CNN optimizer
data_source : object
instance of DataSource class with training/validation iterators
saver : object
instance of Saver class with information about stored files
ctx : object
instance of MXNet context
tc_bigger : list of bool
must quality indexes increase
eval_metric : str
name of MXNet evaluation metric
"""
def __init__(self,
model,
optimizer,
data_source,
saver,
ctx,
tc_bigger=[True],
eval_metric='acc',
**kwargs):
super(Trainer, self).__init__(
model,
optimizer,
data_source,
saver)
self.ctx = ctx
self.tc_bigger = tc_bigger
self.eval_metric = eval_metric
def train(self,
num_epoch,
epoch_tail,
**kwargs):
"""
A point of entry for single training procedure.
Parameters:
----------
num_epoch : int
maximal number of training epochs
epoch_tail : int
number of epochs for overfitting detection
"""
self._prepare_train()
super(Trainer, self).train(num_epoch, epoch_tail, **kwargs)
def hyper_train(self,
num_epoch,
epoch_tail,
bo_num_iter,
bo_kappa,
bo_min_rand_num,
bo_results_filename,
synch_file_list=[],
sync_period=5):
"""
A point of entry for multiple training procedure.
Parameters:
----------
num_epoch : int
maximal number of training epochs
epoch_tail : int
number of epochs for overfitting detection
bo_num_iter : int
number of attempts for bayesian optimization
bo_kappa : float
kappa parameter for bayesian optimization
bo_min_rand_num : int
minimal number of random attempts for overfitting detection
bo_results_filename : str
name of file for results of bayesian optimization
synch_file_list : str
name of file for synchronization of several instances of hyper optimizers
sync_period : int
number of attempts between synchronizations of several instances of hyper optimizers
"""
self._prepare_train()
super(Trainer, self).hyper_train(
num_epoch,
epoch_tail,
bo_num_iter,
bo_kappa,
bo_min_rand_num,
bo_results_filename,
synch_file_list,
sync_period)
def _hyper_train_target_sub(self, **kwargs):
"""
Calling single training procedure for specific hyper parameters from hyper optimizer.
"""
if self.saver.log_filename:
fh = logging.FileHandler(self.saver.log_filename)
self.logger.addHandler(fh)
self.logger.info("Training with parameters: {}".format(kwargs))
train_controller = TrainController(
checkpoints_filename_prefix=self.saver.model_filename_prefix,
last_checkpoints_dirname=self.saver.last_checkpoints_dirname,
best_checkpoints_dirname=self.saver.best_checkpoints_dirname,
bigger=self.tc_bigger,
score_log_filename=self.saver.score_log_filename,
epoch_tail=self.epoch_tail)
if self.iter is not None:
train_controller.score_log_attempt = self.iter
train_iter, val_iter = self.data_source(**kwargs)
mod = mx.mod.Module(
symbol=self.model(**kwargs),
logger=self.logger,
context=self.ctx)
optimizer = self.optimizer(**kwargs)
batch_end_callback = [
mx.callback.Speedometer(self.data_source.batch_size, 100),
train_controller.get_batch_end_callback()]
try:
mod.fit(
train_data=train_iter,
eval_data=val_iter,
eval_metric=self.eval_metric,
epoch_end_callback=train_controller.get_epoch_end_callback(),
batch_end_callback=batch_end_callback,
eval_end_callback=train_controller.get_score_end_callback(),
optimizer=optimizer,
initializer=mx.init.Xavier(),
arg_params=self.arg_params,
aux_params=self.aux_params,
#force_rebind=True,
#force_init=True,
begin_epoch=self.begin_epoch,
num_epoch=self.num_epoch)
except TrainControllerStopException as e:
self.logger.info(e)
train_controller.log_best_results(logger=self.logger)
if self.saver.log_filename:
self.logger.removeHandler(fh)
fh.close()
best_value = (train_controller.best_eval_metric_values[-1][0] if len(
train_controller.best_eval_metric_values) > 0 else 0.0)
del train_controller
del mod
return best_value
def _prepare_train(self):
"""
Initialization of internal fields before single optimization.
"""
self.begin_epoch = 0
self.arg_params = None
self.aux_params = None
| mit |
alash3al/rethinkdb | external/v8_3.30.33.16/testing/gmock/gtest/test/gtest_break_on_failure_unittest.py | 2140 | 7339 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
| agpl-3.0 |
pinusm/Lilach-STIAT | plugins/quest_staircase_init/quest_staircase_init.py | 3 | 3970 | #-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame.exceptions import osexception
from libopensesame import plugins, debug
from libopensesame.item import item
from libqtopensesame.items.qtautoplugin import qtautoplugin
Quest = None
try:
import Quest
debug.msg(u'Loading Quest module directly')
except:
debug.msg(u'Failed to load Quest module directly')
if Quest == None:
try:
from psychopy.contrib import quest as Quest
debug.msg(u'Loading Quest module from PsychoPy')
except:
debug.msg(u'Failed to load Quest module from PsychoPy')
if Quest == None:
try:
Quest = plugins.load_mod(__file__, u'Quest')
debug.msg(u'Loading Quest module from plug-in folder')
except:
debug.msg(u'Failed to load Quest module from plug-in folder')
if Quest == None:
raise osexception(u'Failed to load Quest module.')
class quest_staircase_init(item):
"""
desc:
A plug-in that iniializes a Quest staircase.
"""
description = u'Initializes a new Quest staircase procedure'
def reset(self):
"""
desc:
Initialize default variables.
"""
self.t_guess = .5
self.t_guess_sd = .25
self.p_threshold = .75
self.beta = 3.5
self.delta = .01
self.gamma = .5
self.test_value_method = u'quantile'
self.min_test_value = 0
self.max_test_value = 1
self.var_test_value = u'quest_test_value'
def quest_set_next_test_value(self):
"""
desc:
Sets the next test value for the Quest procedure.
"""
if self.get(u'test_value_method') == u'quantile':
self.experiment.quest_test_value = self.experiment.quest.quantile
elif self.get(u'test_value_method') == u'mean':
self.experiment.quest_test_value = self.experiment.quest.mean
elif self.get(u'test_value_method') == u'mode':
self.experiment.quest_test_value = self.experiment.quest.mode
else:
raise osexception(
u'Unknown test_value_method \'%s\' in quest_staircase_init' \
% self.get(u'test_value_method'))
test_value = max(self.get(u'min_test_value'), min(
self.get(u'max_test_value'), self.experiment.quest_test_value()))
debug.msg(u'quest_test_value = %s' % test_value)
self.experiment.set(u'quest_test_value', test_value)
self.experiment.set(self.get(u'var_test_value'), test_value)
def prepare(self):
"""
desc:
Prepares the plug-in.
"""
self.experiment.quest = Quest.QuestObject(self.get(u't_guess'),
self.get(u't_guess_sd'), self.get(u'p_threshold'),
self.get(u'beta'), self.get(u'delta'), self.get(u'gamma'))
self.experiment.quest_set_next_test_value = \
self.quest_set_next_test_value
self.experiment.quest_set_next_test_value()
def var_info(self):
"""
desc:
Gives a list of dictionaries with variable descriptions.
returns:
desc: A list of (name, description) tuples.
type: list
"""
return item.var_info(self) + [(u'quest_test_value',
u'(Determined by Quest procedure)')]
class qtquest_staircase_init(quest_staircase_init, qtautoplugin):
"""
desc:
The GUI part of the plug-in. Controls are defined in info.json.
"""
def __init__(self, name, experiment, script=None):
"""
desc:
Constructor.
arguments:
name: The name of the plug-in.
experiment: The experiment object.
keywords:
script: A definition script.
"""
quest_staircase_init.__init__(self, name, experiment, script)
qtautoplugin.__init__(self, __file__)
| gpl-3.0 |
holyangel/M9 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
igemsoftware/SYSU-Software2013 | project/Python27_32/Lib/site-packages/pypm/external/2/sqlalchemy/engine/ddl.py | 3 | 5234 | # engine/ddl.py
# Copyright (C) 2009-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to handle CREATE/DROP workflow."""
from sqlalchemy import engine, schema
from sqlalchemy.sql import util as sql_util
class DDLBase(schema.SchemaVisitor):
def __init__(self, connection):
self.connection = connection
class SchemaGenerator(DDLBase):
def __init__(self, dialect, connection, checkfirst=False, tables=None, **kwargs):
super(SchemaGenerator, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables and set(tables) or None
self.preparer = dialect.identifier_preparer
self.dialect = dialect
def _can_create(self, table):
self.dialect.validate_identifier(table.name)
if table.schema:
self.dialect.validate_identifier(table.schema)
return not self.checkfirst or not self.dialect.has_table(self.connection, table.name, schema=table.schema)
def visit_metadata(self, metadata):
if self.tables:
tables = self.tables
else:
tables = metadata.tables.values()
collection = [t for t in sql_util.sort_tables(tables) if self._can_create(t)]
for listener in metadata.ddl_listeners['before-create']:
listener('before-create', metadata, self.connection, tables=collection)
for table in collection:
self.traverse_single(table, create_ok=True)
for listener in metadata.ddl_listeners['after-create']:
listener('after-create', metadata, self.connection, tables=collection)
def visit_table(self, table, create_ok=False):
if not create_ok and not self._can_create(table):
return
for listener in table.ddl_listeners['before-create']:
listener('before-create', table, self.connection)
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
self.connection.execute(schema.CreateTable(table))
if hasattr(table, 'indexes'):
for index in table.indexes:
self.traverse_single(index)
for listener in table.ddl_listeners['after-create']:
listener('after-create', table, self.connection)
def visit_sequence(self, sequence):
if self.dialect.supports_sequences:
if ((not self.dialect.sequences_optional or
not sequence.optional) and
(not self.checkfirst or
not self.dialect.has_sequence(self.connection, sequence.name, schema=sequence.schema))):
self.connection.execute(schema.CreateSequence(sequence))
def visit_index(self, index):
self.connection.execute(schema.CreateIndex(index))
class SchemaDropper(DDLBase):
def __init__(self, dialect, connection, checkfirst=False, tables=None, **kwargs):
super(SchemaDropper, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
self.tables = tables
self.preparer = dialect.identifier_preparer
self.dialect = dialect
def visit_metadata(self, metadata):
if self.tables:
tables = self.tables
else:
tables = metadata.tables.values()
collection = [t for t in reversed(sql_util.sort_tables(tables)) if self._can_drop(t)]
for listener in metadata.ddl_listeners['before-drop']:
listener('before-drop', metadata, self.connection, tables=collection)
for table in collection:
self.traverse_single(table, drop_ok=True)
for listener in metadata.ddl_listeners['after-drop']:
listener('after-drop', metadata, self.connection, tables=collection)
def _can_drop(self, table):
self.dialect.validate_identifier(table.name)
if table.schema:
self.dialect.validate_identifier(table.schema)
return not self.checkfirst or self.dialect.has_table(self.connection, table.name, schema=table.schema)
def visit_index(self, index):
self.connection.execute(schema.DropIndex(index))
def visit_table(self, table, drop_ok=False):
if not drop_ok and not self._can_drop(table):
return
for listener in table.ddl_listeners['before-drop']:
listener('before-drop', table, self.connection)
for column in table.columns:
if column.default is not None:
self.traverse_single(column.default)
self.connection.execute(schema.DropTable(table))
for listener in table.ddl_listeners['after-drop']:
listener('after-drop', table, self.connection)
def visit_sequence(self, sequence):
if self.dialect.supports_sequences:
if ((not self.dialect.sequences_optional or
not sequence.optional) and
(not self.checkfirst or
self.dialect.has_sequence(self.connection, sequence.name, schema=sequence.schema))):
self.connection.execute(schema.DropSequence(sequence))
| mit |
abstract-open-solutions/account-financial-tools | account_reversal/wizard/account_move_reverse.py | 31 | 4847 | # -*- coding: utf-8 -*-
##############################################################################
#
# Account reversal module for OpenERP
# Copyright (C) 2011 Akretion (http://www.akretion.com). All Rights Reserved
# @author Alexis de Lattre <alexis.delattre@akretion.com>
# Copyright (c) 2012-2013 Camptocamp SA (http://www.camptocamp.com)
# @author Guewen Baconnier
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
class account_move_reversal(orm.TransientModel):
_name = "account.move.reverse"
_description = "Create reversal of account moves"
_columns = {
'date': fields.date(
'Reversal Date',
required=True,
help="Enter the date of the reversal account entries. "
"By default, OpenERP proposes the first day of "
"the next period."),
'period_id': fields.many2one(
'account.period',
'Reversal Period',
help="If empty, take the period of the date."),
'journal_id': fields.many2one(
'account.journal',
'Reversal Journal',
help='If empty, uses the journal of the journal entry '
'to be reversed.'),
'move_prefix': fields.char(
'Entries Ref. Prefix',
help="Prefix that will be added to the 'Ref' of the journal "
"entry to be reversed to create the 'Ref' of the "
"reversal journal entry (no space added after the prefix)."),
'move_line_prefix': fields.char(
'Items Name Prefix',
help="Prefix that will be added to the name of the journal "
"item to be reversed to create the name of the reversal "
"journal item (a space is added after the prefix)."),
}
def _next_period_first_date(self, cr, uid, context=None):
if context is None:
context = {}
res = False
period_ctx = context.copy()
period_ctx['account_period_prefer_normal'] = True
period_obj = self.pool.get('account.period')
today_period_id = period_obj.find(cr, uid, context=period_ctx)
if today_period_id:
today_period = period_obj.browse(
cr, uid, today_period_id[0], context=context)
next_period_id = period_obj.next(
cr, uid, today_period, 1, context=context)
if next_period_id:
next_period = period_obj.browse(
cr, uid, next_period_id, context=context)
res = next_period.date_start
return res
_defaults = {
'date': _next_period_first_date,
'move_line_prefix': 'REV -',
}
def action_reverse(self, cr, uid, ids, context=None):
if context is None:
context = {}
assert 'active_ids' in context, "active_ids missing in context"
form = self.read(cr, uid, ids, context=context)[0]
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
move_obj = self.pool.get('account.move')
move_ids = context['active_ids']
period_id = form['period_id'][0] if form.get('period_id') else False
journal_id = form['journal_id'][0] if form.get('journal_id') else False
reversed_move_ids = move_obj.create_reversals(
cr, uid,
move_ids,
form['date'],
reversal_period_id=period_id,
reversal_journal_id=journal_id,
move_prefix=form['move_prefix'],
move_line_prefix=form['move_line_prefix'],
context=context)
__, action_id = mod_obj.get_object_reference(
cr, uid, 'account', 'action_move_journal_line')
action = act_obj.read(cr, uid, [action_id], context=context)[0]
action['domain'] = unicode([('id', 'in', reversed_move_ids)])
action['name'] = _('Reversal Entries')
action['context'] = unicode({'search_default_to_be_reversed': 0})
return action
| agpl-3.0 |
csachs/openmicroscopy | components/tools/OmeroWeb/test/unit/test_webgateway.py | 15 | 17881 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding=utf-8
import time
import os
import pytest
from webgateway.webgateway_cache import FileCache, WebGatewayCache
from webgateway.webgateway_cache import WebGatewayTempFile
import omero.gateway
class TestHelperObjects(object):
def testColorHolder(self):
ColorHolder = omero.gateway.ColorHolder
c1 = ColorHolder()
assert c1._color == {'red': 0, 'green': 0, 'blue': 0, 'alpha': 255}
c1 = ColorHolder('blue')
assert c1.getHtml() == '0000FF'
assert c1.getCss() == 'rgba(0,0,255,1.000)'
assert c1.getRGB() == (0, 0, 255)
c1.setRed(0xF0)
assert c1.getCss() == 'rgba(240,0,255,1.000)'
c1.setGreen(0x0F)
assert c1.getCss() == 'rgba(240,15,255,1.000)'
c1.setBlue(0)
assert c1.getCss() == 'rgba(240,15,0,1.000)'
c1.setAlpha(0x7F)
assert c1.getCss() == 'rgba(240,15,0,0.498)'
c1 = ColorHolder.fromRGBA(50, 100, 200, 300)
assert c1.getCss() == 'rgba(50,100,200,1.000)'
def testOmeroType(self):
omero_type = omero.gateway.omero_type
assert isinstance(omero_type('rstring'), omero.RString)
assert isinstance(omero_type(u'rstring'), omero.RString)
assert isinstance(omero_type(1), omero.RInt)
assert isinstance(omero_type(1L), omero.RLong)
assert isinstance(omero_type(False), omero.RBool)
assert isinstance(omero_type(True), omero.RBool)
assert not isinstance(omero_type((1, 2, 'a')), omero.RType)
def testSplitHTMLColor(self):
splitHTMLColor = omero.gateway.splitHTMLColor
assert splitHTMLColor('abc') == [0xAA, 0xBB, 0xCC, 0xFF]
assert splitHTMLColor('abcd') == [0xAA, 0xBB, 0xCC, 0xDD]
assert splitHTMLColor('abbccd') == [0xAB, 0xBC, 0xCD, 0xFF]
assert splitHTMLColor('abbccdde') == [0xAB, 0xBC, 0xCD, 0xDE]
assert splitHTMLColor('#$%&%') is None
def _testCacheFSBlockSize(cache):
cache.wipe()
c1 = cache._du()
cache.set('test/1', 'a')
c2 = cache._du()
cache.wipe()
return c1, c2-c1
class TestFileCache(object):
@pytest.fixture(autouse=True)
def setUp(self, request):
def fin():
os.system('rm -fr test_cache')
request.addfinalizer(fin)
self.cache = FileCache('test_cache')
def testTimeouts(self):
assert (self.cache.get('date/test/1') is None,
'Key already exists in cache')
self.cache.set('date/test/1', '1', timeout=3)
assert self.cache.get('date/test/1') == '1', 'Key not properly cached'
time.sleep(4)
assert self.cache.get('date/test/1') is None, 'Timeout failed'
# if _default_timeout is 0, timeouts are simply not checked
self.cache.wipe()
self.cache._default_timeout = 0
assert (self.cache.get('date/test/1') is None,
'Key already exists in cache')
self.cache.set('date/test/1', '1', timeout=3)
assert (self.cache.get('date/test/1') == '1',
'Key not properly cached')
time.sleep(4)
assert self.cache.has_key('date/test/1') # noqa
assert (self.cache.get('date/test/1') == '1',
'Key got timedout and should not')
def testMaxSize(self):
empty_size, cache_block = _testCacheFSBlockSize(self.cache)
self.cache._max_size = empty_size + 4*cache_block + 1
# There is an overhead (8 bytes in my system) for timestamp per file,
# and the limit is only enforced after we cross over it
for i in range(6):
self.cache.set('date/test/%d' % i, 'abcdefgh'*127*cache_block)
for i in range(4):
assert (self.cache.get('date/test/%d' % i) ==
'abcdefgh' * 127 * cache_block,
'Key %d not properly cached' % i)
assert self.cache.get('date/test/5') is None, 'Size limit failed'
self.cache._max_size = 0
self.cache.wipe()
for i in range(6):
self.cache.set('date/test/%d' % i, 'abcdefgh'*127*cache_block)
for i in range(6):
assert (self.cache.get('date/test/%d' % i) ==
'abcdefgh' * 127 * cache_block,
'Key %d not properly cached' % i)
def testMaxEntries(self):
self.cache._max_entries = 2
self.cache.set('date/test/1', '1')
self.cache.set('date/test/2', '2')
self.cache.set('date/test/3', '3')
assert self.cache.get('date/test/1') == '1', 'Key not properly cached'
assert self.cache.get('date/test/2') == '2', 'Key not properly cached'
assert (self.cache.get('date/test/3') is None,
'File number limit failed')
self.cache.wipe()
self.cache._max_entries = 0
self.cache.set('date/test/1', '1')
self.cache.set('date/test/2', '2')
self.cache.set('date/test/3', '3')
assert self.cache.get('date/test/1') == '1', 'Key not properly cached'
assert self.cache.get('date/test/2') == '2', 'Key not properly cached'
assert self.cache.get('date/test/3') == '3', 'Key not properly cached'
def testPurge(self):
self.cache._max_entries = 2
self.cache._default_timeout = 3
self.cache.set('date/test/1', '1')
self.cache.set('date/test/2', '2')
self.cache.set('date/test/3', '3')
assert self.cache.get('date/test/1') == '1', 'Key not properly cached'
assert self.cache.get('date/test/2') == '2', 'Key not properly cached'
assert (self.cache.get('date/test/3') is None,
'File number limit failed')
time.sleep(4)
self.cache.set('date/test/3', '3')
assert self.cache.get('date/test/3') == '3', 'Purge not working'
def testOther(self):
# set should only accept strings as values
pytest.raises(ValueError, self.cache.set, 'date/test/1', 123)
# keys can't have .. or start with /
pytest.raises(ValueError, self.cache.set, '/date/test/1', '1')
pytest.raises(ValueError, self.cache.set, 'date/test/../1', '1')
# get some test data in
self.cache.set('date/test/1', '1')
self.cache.set('date/test/2', '2')
self.cache.set('date/test/3', '3')
assert self.cache.get('date/test/1') == '1', 'Key not properly cached'
assert self.cache.get('date/test/2') == '2', 'Key not properly cached'
assert self.cache.get('date/test/3') == '3', 'Key not properly cached'
# check has_key
assert self.cache.has_key('date/test/1') # noqa
assert not self.cache.has_key('date/test/bogus') # noqa
# assert wipe() nukes the whole thing
assert self.cache._num_entries == 3
self.cache.wipe()
assert self.cache._num_entries == 0
class TestWebGatewayCacheTempFile(object):
@pytest.fixture(autouse=True)
def setUp(self, request):
def fin():
os.system('rm -fr test_cache')
request.addfinalizer(fin)
self.tmpfile = WebGatewayTempFile(tdir='test_cache')
def testFilenameSize(self):
"""
Make sure slashes, dashes, underscores and other chars don't mess
things up.
Also check for filename size limits.
"""
fname = '1/2_3!"\'#$%&()=@€£‰¶÷[]≠§±+*~^\,.;:'
try:
fpath, rpath, fobj = self.tmpfile.new(fname, key='specialchars')
except:
raise
pytest.fail('WebGatewayTempFile.new not handling special'
' characters properly')
# ext2/3/4 limit is 255 bytes, most others are equal to or larger
fname = "a"*384
try:
fpath, rpath, fobj = self.tmpfile.new(fname, key='longname')
fobj.close()
# is it keeping extensions properly?
fpath, rpath, fobj = self.tmpfile.new(
"1" + fname + '.tif', key='longname')
fobj.close()
assert fpath[-5:] == 'a.tif'
fpath, rpath, fobj = self.tmpfile.new(
"2" + fname + '.ome.tiff', key='longname')
fobj.close()
assert fpath[-10:] == 'a.ome.tiff'
fpath, rpath, fobj = self.tmpfile.new(
"3" + fname + 'ome.tiff', key='longname')
fobj.close()
assert fpath[-6:] == 'a.tiff'
fpath, rpath, fobj = self.tmpfile.new(
"4" + fname + 'somethingverylong.zip', key='longname')
fobj.close()
assert fpath[-5:] == 'a.zip'
fpath, rpath, fobj = self.tmpfile.new(
"5" + fname + '.tif.somethingverylong', key='longname')
fobj.close()
assert fpath[-5:] == 'aaaaa'
except:
pytest.fail('WebGatewayTempFile.new not handling long file names'
' properly')
class TestWebGatewayCache(object):
@pytest.fixture(autouse=True)
def setUp(self, request):
def fin():
os.system('rm -fr test_cache')
request.addfinalizer(fin)
self.wcache = WebGatewayCache(backend=FileCache, basedir='test_cache')
class r:
def __init__(self):
self.REQUEST = {'c': '1|292:1631$FF0000,2|409:5015$0000FF',
'm': 'c', 'q': '0.9'}
def new(self, q):
rv = self.__class__()
rv.REQUEST.update(q)
return rv
self.request = r()
def testCacheSettings(self):
uid = 123
# empty_size, cache_block =
# _testCacheFSBlockSize(self.wcache._thumb_cache)
self.wcache._updateCacheSettings(self.wcache._thumb_cache, timeout=2,
max_entries=5, max_size=0)
cachestr = 'abcdefgh' * 127
self.wcache._thumb_cache.wipe()
for i in range(6):
self.wcache.setThumb(self.request, 'test', uid, i, cachestr)
max_size = self.wcache._thumb_cache._du()
self.wcache._updateCacheSettings(self.wcache._thumb_cache, timeout=2,
max_entries=5, max_size=max_size)
self.wcache._thumb_cache.wipe()
for i in range(6):
self.wcache.setThumb(self.request, 'test', uid, i, cachestr)
for i in range(4):
assert (self.wcache.getThumb(self.request, 'test', uid, i) ==
cachestr, 'Key %d not properly cached' % i)
assert (self.wcache.getThumb(self.request, 'test', uid, 5) is None,
'Size limit failed')
for i in range(10):
self.wcache.setThumb(self.request, 'test', uid, i, 'abcdefgh')
for i in range(5):
assert (self.wcache.getThumb(self.request, 'test', uid, i) ==
'abcdefgh', 'Key %d not properly cached' % i)
assert (self.wcache.getThumb(self.request, 'test', uid, 5) is None,
'Entries limit failed')
time.sleep(2)
assert (self.wcache.getThumb(self.request, 'test', uid, 0) is None,
'Time limit failed')
def testThumbCache(self):
uid = 123
assert self.wcache.getThumb(self.request, 'test', uid, 1) is None
self.wcache.setThumb(self.request, 'test', uid, 1, 'thumbdata')
assert (self.wcache.getThumb(self.request, 'test', uid, 1) ==
'thumbdata', 'Thumb not properly cached (%s)' %
self.wcache.getThumb(self.request, 'test', uid, 1))
self.wcache.clearThumb(self.request, 'test', uid, 1)
assert self.wcache.getThumb(self.request, 'test', uid, 1) is None
# Make sure clear() nukes this
self.wcache.setThumb(self.request, 'test', uid, 1, 'thumbdata')
assert (self.wcache.getThumb(self.request, 'test', uid, 1) ==
'thumbdata', 'Thumb not properly cached')
assert self.wcache._thumb_cache._num_entries != 0
self.wcache.clear()
assert self.wcache._thumb_cache._num_entries == 0
def testImageCache(self):
uid = 123
# Also add a thumb, a split channel and a projection, as it should get
# deleted with image
preq = self.request.new({'p': 'intmax'})
assert self.wcache.getThumb(self.request, 'test', uid, 1) is None
self.wcache.setThumb(self.request, 'test', uid, 1, 'thumbdata')
assert (self.wcache.getThumb(self.request, 'test', uid, 1) ==
'thumbdata')
img = omero.gateway.ImageWrapper(None, omero.model.ImageI(1, False))
assert self.wcache.getImage(self.request, 'test', img, 2, 3) is None
self.wcache.setImage(self.request, 'test', img, 2, 3, 'imagedata')
assert (self.wcache.getImage(self.request, 'test', img, 2, 3) ==
'imagedata')
assert self.wcache.getImage(preq, 'test', img, 2, 3) is None
self.wcache.setImage(preq, 'test', img, 2, 3, 'imagedata')
assert self.wcache.getImage(preq, 'test', img, 2, 3) == 'imagedata'
assert (self.wcache.getSplitChannelImage(self.request, 'test', img, 2,
3) is None)
self.wcache.setSplitChannelImage(self.request, 'test', img, 2, 3,
'imagedata')
assert (self.wcache.getSplitChannelImage(self.request, 'test', img, 2,
3) == 'imagedata')
self.wcache.clearImage(self.request, 'test', uid, img)
assert self.wcache.getImage(self.request, 'test', img, 2, 3) is None
assert (self.wcache.getSplitChannelImage(self.request, 'test', img, 2,
3) is None)
assert self.wcache.getImage(preq, 'test', img, 2, 3) is None
assert self.wcache.getThumb(self.request, 'test', uid, 1) is None
# The exact same behaviour, using invalidateObject
self.wcache.setThumb(self.request, 'test', uid, 1, 'thumbdata')
assert (self.wcache.getThumb(self.request, 'test', uid, 1) ==
'thumbdata')
self.wcache.setImage(self.request, 'test', img, 2, 3, 'imagedata')
assert (self.wcache.getImage(self.request, 'test', img, 2, 3) ==
'imagedata')
assert self.wcache.getImage(preq, 'test', img, 2, 3) is None
self.wcache.setImage(preq, 'test', img, 2, 3, 'imagedata')
assert self.wcache.getImage(preq, 'test', img, 2, 3) == 'imagedata'
assert (self.wcache.getSplitChannelImage(self.request, 'test', img, 2,
3) is None)
self.wcache.setSplitChannelImage(self.request, 'test', img, 2, 3,
'imagedata')
assert (self.wcache.getSplitChannelImage(self.request, 'test', img, 2,
3) == 'imagedata')
self.wcache.invalidateObject('test', uid, img)
assert self.wcache.getImage(self.request, 'test', img, 2, 3) is None
assert (self.wcache.getSplitChannelImage(self.request, 'test', img, 2,
3) is None)
assert self.wcache.getImage(preq, 'test', img, 2, 3) is None
assert self.wcache.getThumb(self.request, 'test', uid, 1) is None
# Make sure clear() nukes this
assert self.wcache.getImage(self.request, 'test', img, 2, 3) is None
self.wcache.setImage(self.request, 'test', img, 2, 3, 'imagedata')
assert (self.wcache.getImage(self.request, 'test', img, 2, 3) ==
'imagedata')
assert self.wcache._img_cache._num_entries != 0
self.wcache.clear()
assert self.wcache._img_cache._num_entries == 0
def testLocks(self):
wcache2 = WebGatewayCache(backend=FileCache,
basedir=self.wcache._basedir)
# wcache2 will hold the lock
assert wcache2.tryLock()
assert not self.wcache.tryLock()
assert wcache2.tryLock()
del wcache2
# The lock should have been removed
assert self.wcache.tryLock()
def testJsonCache(self):
uid = 123
ds = omero.gateway.DatasetWrapper(None, omero.model.DatasetI(1,
False))
assert (self.wcache.getDatasetContents(self.request, 'test', ds) is
None)
self.wcache.setDatasetContents(self.request, 'test', ds,
'datasetdata')
assert (self.wcache.getDatasetContents(self.request, 'test', ds) ==
'datasetdata')
self.wcache.clearDatasetContents(self.request, 'test', ds)
assert (self.wcache.getDatasetContents(self.request, 'test', ds) is
None)
# The exact same behaviour, using invalidateObject
assert (self.wcache.getDatasetContents(self.request, 'test', ds) is
None)
self.wcache.setDatasetContents(self.request, 'test', ds,
'datasetdata')
assert (self.wcache.getDatasetContents(self.request, 'test', ds) ==
'datasetdata')
self.wcache.invalidateObject('test', uid, ds)
assert (self.wcache.getDatasetContents(self.request, 'test', ds) is
None)
# Make sure clear() nukes this
assert (self.wcache.getDatasetContents(self.request, 'test', ds) is
None)
self.wcache.setDatasetContents(self.request, 'test', ds,
'datasetdata')
assert (self.wcache.getDatasetContents(self.request, 'test', ds) ==
'datasetdata')
assert self.wcache._json_cache._num_entries != 0
self.wcache.clear()
assert self.wcache._json_cache._num_entries == 0
| gpl-2.0 |
nagyistoce/OpenBird | cocos2d/plugin/tools/toolsForGame/modifyProject.py | 255 | 1300 | import sys, string, os
from xml.etree import ElementTree as ET
from xml.dom import minidom
projFile = sys.argv[1]
targetPath = sys.argv[2]
def getLinkElement():
global targetPath
ret = ET.Element('link')
nameEle = ET.Element('name')
nameEle.text = 'plugin-x'
typeEle = ET.Element('type')
typeEle.text = '2'
locEle = ET.Element('locationURI')
locEle.text = targetPath
ret.append(nameEle)
ret.append(typeEle)
ret.append(locEle)
return ret
tree = ET.parse(projFile)
root = tree.getroot()
nodeLinkRes = root.find('linkedResources')
if nodeLinkRes != None:
linkNodes = nodeLinkRes.findall('link')
haveTarget = False
if linkNodes != None and len(linkNodes) > 0:
for node in linkNodes:
locNode = node.find('locationURI')
if locNode == None:
continue
tempText = locNode.text
tempText = tempText.strip(' \n\r\t')
if tempText == targetPath:
haveTarget = True
break
if not haveTarget:
nodeLinkRes.append(getLinkElement())
tree.write(projFile, 'UTF-8')
else:
linkResEle = ET.Element('linkedResources')
linkResEle.append(getLinkElement())
root.append(linkResEle)
tree.write(projFile, 'UTF-8')
| mit |
taoger/titanium_mobile | support/android/builder.py | 30 | 97623 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Appcelerator Titanium Mobile
# Copyright (c) 2011-2012 by Appcelerator, Inc. All Rights Reserved.
# Licensed under the terms of the Apache Public License
# Please see the LICENSE included with this distribution for details.
#
# General builder script for staging, packaging, deploying,
# and debugging Titanium Mobile applications on Android
#
import os, sys, subprocess, shutil, time, signal, string, platform, re, glob, hashlib, imp, inspect
import run, avd, prereq, zipfile, tempfile, fnmatch, codecs, traceback, sgmllib
from os.path import splitext
from compiler import Compiler
from os.path import join, splitext, split, exists
from shutil import copyfile
from xml.dom.minidom import parseString
from tilogger import *
from datetime import datetime, timedelta
reload(sys) # this is required to prevent the following error: "AttributeError: 'module' object has no attribute 'setdefaultencoding'"
sys.setdefaultencoding("utf_8") # Fix umlaut issues
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
top_support_dir = os.path.dirname(template_dir)
sys.path.append(top_support_dir)
sys.path.append(os.path.join(top_support_dir, 'common'))
sys.path.append(os.path.join(top_support_dir, 'module'))
import simplejson, java
from mako.template import Template
from tiapp import *
from android import Android
from androidsdk import AndroidSDK
from deltafy import Deltafy, Delta
from css import csscompiler
from module import ModuleDetector
import localecompiler
import fastdev
import requireIndex
resourceFiles = ['strings.xml', 'attrs.xml', 'styles.xml', 'bools.xml', 'colors.xml',
'dimens.xml', 'ids.xml', 'integers.xml', 'arrays.xml']
ignoreFiles = ['.gitignore', '.cvsignore', '.DS_Store'];
ignoreDirs = ['.git','.svn','_svn', 'CVS'];
android_avd_hw = {'hw.camera': 'yes', 'hw.gps':'yes'}
res_skips = ['style']
log = None
# Copied from frameworks/base/tools/aapt/Package.cpp
uncompressed_types = [
".jpg", ".jpeg", ".png", ".gif",
".wav", ".mp2", ".mp3", ".ogg", ".aac",
".mpg", ".mpeg", ".mid", ".midi", ".smf", ".jet",
".rtttl", ".imy", ".xmf", ".mp4", ".m4a",
".m4v", ".3gp", ".3gpp", ".3g2", ".3gpp2",
".amr", ".awb", ".wma", ".wmv"
]
# Java keywords to reference in case app id contains java keyword
java_keywords = [
"abstract", "continue", "for", "new", "switch",
"assert", "default", "goto", "package", "synchronized",
"boolean", "do", "if", "private", "this",
"break", "double", "implements", "protected", "throw",
"byte", "else", "import", "public", "throws",
"case", "enum", "instanceof", "return", "transient",
"catch", "extends", "int", "short", "try",
"char", "final", "interface", "static", "void",
"class", "finally", "long", "strictfp", "volatile",
"const", "float", "native", "super", "while",
"true", "false", "null"
]
MIN_API_LEVEL = 10
HONEYCOMB_MR2_LEVEL = 13
KNOWN_ABIS = ("armeabi", "armeabi-v7a", "x86")
# Used only to find <script> tags in HTML files
# so we can be sure to package referenced JS files
# even when compiling for production. (See
# Builder.package_and_deploy later in this file.)
class HTMLParser(sgmllib.SGMLParser):
def parse(self, html_source):
self.feed(html_source)
self.close()
def __init__(self, verbose=0):
sgmllib.SGMLParser.__init__(self, verbose)
self.referenced_js_files = []
def start_script(self, attributes):
for name, value in attributes:
if value and name.lower() == "src":
self.referenced_js_files.append(value.lower())
def get_referenced_js_files(self):
return self.referenced_js_files
def launch_logcat():
valid_device_switches = ('-e', '-d', '-s')
device_id = None
android_sdk_location = None
adb_location = None
logcat_process = None
device_switch = None # e.g., -e or -d or -s
def show_usage():
print >> sys.stderr, ""
print >> sys.stderr, "%s devicelog <sdk_dir> <device_switch> [device_serial_number]" % os.path.basename(sys.argv[0])
print >> sys.stderr, ""
print >> sys.stderr, "The <device_switch> can be -e, -d -s. If -s, also pass serial number."
sys.exit(1)
if len(sys.argv) < 3:
print >> sys.stderr, "Missing Android SDK location."
show_usage()
else:
android_sdk_location = os.path.abspath(os.path.expanduser(sys.argv[2]))
adb_location = AndroidSDK(android_sdk_location).get_adb()
if len(sys.argv) < 4:
print >> sys.stderr, "Missing device/emulator switch (e.g., -e, -d, -s)."
show_usage()
device_switch = sys.argv[3]
if device_switch not in valid_device_switches:
print >> sys.stderr, "Unknown device type switch: %s" % device_switch
show_usage()
if device_switch == "-s":
if len(sys.argv) < 5:
print >> sys.stderr, "Must specify serial number when using -s."
show_usage()
else:
device_id = sys.argv[4]
# For killing the logcat process if our process gets killed.
def signal_handler(signum, frame):
print "[DEBUG] Signal %s received. Terminating the logcat process." % signum
if logcat_process is not None:
if platform.system() == "Windows":
os.system("taskkill /F /T /PID %i" % logcat_process.pid)
else:
os.kill(logcat_process.pid, signal.SIGTERM)
# make sure adb is running on windows, else XP can lockup the python
# process when adb runs first time
if platform.system() == "Windows":
run.run([adb_location, "start-server"], True, ignore_output=True)
logcat_cmd = [adb_location, device_switch]
if device_id:
logcat_cmd.append(device_id)
logcat_cmd.extend(["logcat", "-s", "*:d,*,TiAPI:V"])
logcat_process = subprocess.Popen(logcat_cmd)
if platform.system() != "Windows":
signal.signal(signal.SIGHUP, signal_handler)
signal.signal(signal.SIGQUIT, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGABRT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# In case it's gonna exit early (like if the command line
# was wrong or something) give it a chance to do so before we start
# waiting on it.
time.sleep(1)
return_code = logcat_process.poll()
if return_code:
signal_handler(signal.SIGQUIT, None)
sys.exit(return_code)
# Now wait for it.
try:
return_code = logcat_process.wait()
except OSError:
signal_handler(signal.SIGQUIT, None)
sys.exit(return_code)
sys.exit(return_code)
def render_template_with_tiapp(template_text, tiapp_obj):
t = Template(template_text)
return t.render(tiapp=tiapp_obj)
def remove_ignored_dirs(dirs):
for d in dirs:
if d in ignoreDirs:
dirs.remove(d)
# ZipFile.extractall introduced in Python 2.6, so this is workaround for earlier
# versions
def zip_extractall(zfile, target_dir):
file_infos = zfile.infolist()
for info in file_infos:
if info.file_size > 0:
file_path = os.path.join(target_dir, os.path.normpath(info.filename))
parent_path = os.path.dirname(file_path)
if not os.path.exists(parent_path):
os.makedirs(parent_path)
out_file = open(file_path, "wb")
out_file.write(zfile.read(info.filename))
out_file.close()
def dequote(s):
if s[0:1] == '"':
return s[1:-1]
return s
def pipe(args1,args2):
p1 = subprocess.Popen(args1, stdout=subprocess.PIPE)
p2 = subprocess.Popen(args2, stdin=p1.stdout, stdout=subprocess.PIPE)
return p2.communicate()[0]
def read_properties(propFile, separator=":= "):
propDict = dict()
for propLine in propFile:
propDef = propLine.strip()
if len(propDef) == 0:
continue
if propDef[0] in ( '!', '#' ):
continue
punctuation= [ propDef.find(c) for c in separator ] + [ len(propDef) ]
found= min( [ pos for pos in punctuation if pos != -1 ] )
name= propDef[:found].rstrip()
value= propDef[found:].lstrip(separator).rstrip()
propDict[name]= value
propFile.close()
return propDict
def info(msg):
log.info(msg)
def debug(msg):
log.debug(msg)
def warn(msg):
log.warn(msg)
def trace(msg):
log.trace(msg)
def error(msg):
log.error(msg)
def copy_all(source_folder, dest_folder, mergeXMLResources=False, ignore_dirs=[], ignore_files=[], ignore_exts=[], one_time_msg=""):
msg_shown = False
for root, dirs, files in os.walk(source_folder, True, None, True):
for d in dirs:
if d in ignore_dirs:
dirs.remove(d)
for f in files:
if f in ignore_files:
continue
ext = os.path.splitext(f)[1]
if ext in ignore_exts:
continue
if one_time_msg and not msg_shown:
info(one_time_msg)
msg_shown = True
from_ = os.path.join(root, f)
to_ = from_.replace(source_folder, dest_folder, 1)
to_directory = os.path.split(to_)[0]
if not os.path.exists(to_directory):
os.makedirs(to_directory)
shutil.copyfile(from_, to_)
#
# Merge the xml resource files in res/values/ if there are multiple files with the same name.
# (TIMOB-12663)
#
elif mergeXMLResources and os.path.isfile(to_) and f in resourceFiles:
sfile = open(from_, 'r')
dfile = open(to_, 'r')
scontent = sfile.read()
dcontent = dfile.read()
sfile.close()
dfile.close()
sindex = scontent.find('</resources>')
dindex = dcontent.find('>', dcontent.find('<resources')) + 1
content_to_write = scontent[:sindex] + dcontent[dindex:]
wfile = open(to_, 'w')
wfile.write(content_to_write)
wfile.close()
else:
shutil.copyfile(from_, to_)
def remove_orphaned_files(source_folder, target_folder, ignore=[]):
is_res = source_folder.endswith('Resources') or source_folder.endswith('Resources' + os.sep)
for root, dirs, files in os.walk(target_folder):
for f in files:
if f in ignore:
continue
full = os.path.join(root, f)
rel = full.replace(target_folder, '')
if rel[0] == os.sep:
rel = rel[1:]
is_orphan = False
if not os.path.exists(os.path.join(source_folder, rel)):
is_orphan = True
# But it could be under android/... too (platform-specific)
if is_orphan and is_res:
if os.path.exists(os.path.join(source_folder, 'android', rel)):
is_orphan = False
if is_orphan:
os.remove(full)
def is_resource_drawable(path):
if re.search("android/images/(high|medium|low|res-[^/]+)/", path.replace(os.sep, "/")):
return True
else:
return False
def resource_drawable_folder(path):
if not is_resource_drawable(path):
return None
else:
pattern = r'/android/images/(high|medium|low|res-[^/]+)/'
match = re.search(pattern, path.replace(os.sep, "/"))
if not match.groups():
return None
folder = match.groups()[0]
if re.match('high|medium|low', folder):
return 'drawable-%sdpi' % folder[0]
else:
return 'drawable-%s' % folder.replace('res-', '')
def remove_duplicate_nodes_in_res_file(full_path, node_names_to_check):
f = open(full_path, 'r')
contents = f.read()
f.close()
doc = parseString(contents)
resources_node = doc.getElementsByTagName('resources')[0]
made_change = False
for node_name in node_names_to_check:
nodes = doc.getElementsByTagName(node_name)
if len(nodes) == 0:
continue
name_list = [] #keeps track of the name attribute for the node we are checking
for node in nodes:
# Only check for the children of the "resources" node
if node.parentNode != resources_node:
continue
name = node.getAttribute('name')
# Remove the node with the duplicate names
if name in name_list:
resources_node.removeChild(node)
made_change = True
debug('Removed duplicate node [%s] from %s' %(name, full_path))
else:
name_list.append(name)
if made_change:
new_contents = doc.toxml()
f = codecs.open(full_path, 'w')
f.write(new_contents)
f.close()
class Builder(object):
def __init__(self, name, sdk, project_dir, support_dir, app_id, is_emulator):
self.top_dir = project_dir
self.project_tiappxml = os.path.join(self.top_dir,'tiapp.xml')
self.project_dir = os.path.join(project_dir,'build','android')
self.res_dir = os.path.join(self.project_dir,'res')
self.platform_dir = os.path.join(project_dir, 'platform', 'android')
self.project_src_dir = os.path.join(self.project_dir, 'src')
self.project_gen_dir = os.path.join(self.project_dir, 'gen')
self.name = name
self.app_id = app_id
self.support_dir = support_dir
self.compiled_files = []
self.force_rebuild = False
self.debugger_host = None
self.debugger_port = -1
self.profiler_host = None
self.profiler_port = -1
self.fastdev_port = -1
self.fastdev = False
self.compile_js = False
self.tool_api_level = MIN_API_LEVEL
self.abis = list(KNOWN_ABIS)
# don't build if a java keyword in the app id would cause the build to fail
tok = self.app_id.split('.')
for token in tok:
if token in java_keywords:
error("Do not use java keywords for project app id, such as " + token)
sys.exit(1)
tool_api_level_explicit = False
temp_tiapp = TiAppXML(self.project_tiappxml)
if temp_tiapp and temp_tiapp.android:
if 'tool-api-level' in temp_tiapp.android:
self.tool_api_level = int(temp_tiapp.android['tool-api-level'])
tool_api_level_explicit = True
if 'abi' in temp_tiapp.android and temp_tiapp.android['abi'] != 'all':
tiapp_abis = [abi.strip() for abi in temp_tiapp.android['abi'].split(",")]
to_remove = [bad_abi for bad_abi in tiapp_abis if bad_abi not in KNOWN_ABIS]
if to_remove:
warn("The following ABIs listed in the Android <abi> section of tiapp.xml are unknown and will be ignored: %s." % ", ".join(to_remove))
tiapp_abis = [abi for abi in tiapp_abis if abi not in to_remove]
self.abis = tiapp_abis
if not self.abis:
warn("Android <abi> tiapp.xml section does not specify any valid ABIs. Defaulting to '%s'." %
",".join(KNOWN_ABIS))
self.abis = list(KNOWN_ABIS)
self.sdk = AndroidSDK(sdk, self.tool_api_level)
# If the tool-api-level was not explicitly set in the tiapp.xml, but
# <uses-sdk android:targetSdkVersion> *is* set, try to match the target version.
if (not tool_api_level_explicit and temp_tiapp and temp_tiapp.android_manifest
and "manifest" in temp_tiapp.android_manifest):
self.check_target_api_version(temp_tiapp.android_manifest["manifest"])
self.tiappxml = temp_tiapp
json_contents = open(os.path.join(template_dir,'dependency.json')).read()
self.depends_map = simplejson.loads(json_contents)
# favor the ANDROID_SDK_HOME environment variable if used
if os.environ.has_key('ANDROID_SDK_HOME') and os.path.exists(os.environ['ANDROID_SDK_HOME']):
self.home_dir = os.path.join(os.environ['ANDROID_SDK_HOME'], '.titanium')
self.android_home_dir = os.path.join(os.environ['ANDROID_SDK_HOME'], '.android')
# we place some files in the users home
elif platform.system() == "Windows":
self.home_dir = os.path.join(os.environ['USERPROFILE'], '.titanium')
self.android_home_dir = os.path.join(os.environ['USERPROFILE'], '.android')
else:
self.home_dir = os.path.join(os.path.expanduser('~'), '.titanium')
self.android_home_dir = os.path.join(os.path.expanduser('~'), '.android')
if not os.path.exists(self.home_dir):
os.makedirs(self.home_dir)
self.sdcard = os.path.join(self.home_dir,'android2.sdcard')
self.classname = Android.strip_classname(self.name)
if not is_emulator:
self.set_java_commands()
# start in 1.4, you no longer need the build/android directory
# if missing, we'll create it on the fly
if not os.path.exists(self.project_dir) or not os.path.exists(os.path.join(self.project_dir,'AndroidManifest.xml')):
android_creator = Android(name, app_id, self.sdk, None, self.java)
parent_dir = os.path.dirname(self.top_dir)
if os.path.exists(self.top_dir):
android_creator.create(parent_dir, project_dir=self.top_dir, build_time=True)
else:
android_creator.create(parent_dir)
self.force_rebuild = True
sys.stdout.flush()
def check_target_api_version(self, manifest_elements):
pattern = r'android:targetSdkVersion=\"(\d+)\"'
for el in manifest_elements:
if el.nodeName == "uses-sdk":
xml = el.toxml()
matches = re.findall(pattern, xml)
if matches:
new_level = self.sdk.try_best_match_api_level(int(matches[0]))
if new_level != self.tool_api_level:
self.tool_api_level = new_level
break
def set_java_commands(self):
commands = java.find_java_commands()
to_check = ("java", "javac", "keytool", "jarsigner")
found = True
for check in to_check:
if not commands[check]:
found = False
error("Required Java tool '%s' not located." % check)
if not found:
error("One or more required files not found - please check your JAVA_HOME environment variable")
sys.exit(1)
self.jarsigner = commands["jarsigner"]
self.keytool = commands["keytool"]
self.javac = commands["javac"]
self.java = commands["java"]
if not commands["environ_java_home"] and commands["java_home"]:
os.environ["JAVA_HOME"] = commands["java_home"]
def wait_for_home(self, type):
max_wait = 20
attempts = 0
while True:
processes = self.sdk.list_processes(['-%s' % type])
found_home = False
for process in processes:
if process["name"] == "android.process.acore":
found_home = True
break
if found_home:
break
attempts += 1
if attempts == max_wait:
error("Timed out waiting for android.process.acore")
return False
time.sleep(1)
return True
def wait_for_device(self, type):
debug("Waiting for device to be ready ...")
t = time.time()
max_wait = 30
max_zero = 10
attempts = 0
zero_attempts = 0
timed_out = True
no_devices = False
while True:
devices = self.sdk.list_devices()
trace("adb devices returned %s devices/emulators" % len(devices))
if len(devices) > 0:
found = False
for device in devices:
if type == "e" and device.is_emulator() and not device.is_offline(): found = True
elif type == "d" and device.is_device(): found = True
if found:
timed_out = False
break
else: zero_attempts += 1
try: time.sleep(5) # for some reason KeyboardInterrupts get caught here from time to time
except KeyboardInterrupt: pass
attempts += 1
if attempts == max_wait:
break
elif zero_attempts == max_zero:
no_devices = True
break
if timed_out:
if type == "e":
device = "emulator"
extra_message = "you may need to close the emulator and try again"
else:
device = "device"
extra_message = "you may try reconnecting the USB cable"
error("Timed out waiting for %s to be ready, %s" % (device, extra_message))
if no_devices:
sys.exit(1)
return False
debug("Device connected... (waited %d seconds)" % (attempts*5))
duration = time.time() - t
debug("waited %f seconds on emulator to get ready" % duration)
if duration > 1.0:
info("Waiting for the Android Emulator to become available")
return self.wait_for_home(type)
#time.sleep(20) # give it a little more time to get installed
return True
def create_avd(self, avd_id, avd_skin, avd_abi):
# Sanity check the AVD to see if the ABI is available, or
# necessary.
available_avds = avd.get_avds(self.sdk)
multiple_abis = False
for device in available_avds:
if device['id'] == avd_id:
default_abi = device['abis'][0]
multiple_abis = ( len(device['abis']) != 1 )
if avd_abi is None:
avd_abi = default_abi
elif avd_abi not in device['abis']:
warn("ABI %s not supported for AVD ID %s: Using default ABI %s" % (avd_abi, avd_id, default_abi))
avd_abi = default_abi
break
if multiple_abis:
name = "titanium_%s_%s_%s" % (avd_id, avd_skin, avd_abi)
else:
name = "titanium_%s_%s" % (avd_id, avd_skin)
name = name.replace(' ', '_')
if not os.path.exists(self.home_dir):
os.makedirs(self.home_dir)
avd_path = os.path.join(self.android_home_dir, 'avd')
my_avd = os.path.join(avd_path,"%s.avd" % name)
own_sdcard = os.path.join(self.home_dir, '%s.sdcard' % name)
if not os.path.exists(my_avd) or os.path.exists(own_sdcard):
# starting with 1.7.2, when we create a new avd, give it its own
# SDCard as well.
self.sdcard = own_sdcard
if not os.path.exists(self.sdcard):
info("Creating 64M SD card for use in Android emulator")
run.run([self.sdk.get_mksdcard(), '64M', self.sdcard])
if not os.path.exists(my_avd):
if multiple_abis:
info("Creating new Android Virtual Device (%s %s %s)" % (avd_id,avd_skin,avd_abi))
else:
info("Creating new Android Virtual Device (%s %s)" % (avd_id,avd_skin))
inputgen = os.path.join(template_dir,'input.py')
abi_args = []
if multiple_abis:
abi_args = ['-b', avd_abi]
pipe([sys.executable, inputgen], [self.sdk.get_android(), '--verbose', 'create', 'avd', '--name', name, '--target', avd_id, '-s', avd_skin, '--force', '--sdcard', self.sdcard] + abi_args)
inifile = os.path.join(my_avd,'config.ini')
inifilec = open(inifile,'r').read()
inifiledata = open(inifile,'w')
inifiledata.write(inifilec)
# TODO - Document options
for hw_option in android_avd_hw.keys():
inifiledata.write("%s=%s\n" % (hw_option, android_avd_hw[hw_option]))
inifiledata.close()
return name
def run_emulator(self, avd_id, avd_skin, avd_name, avd_abi, add_args):
info("Launching Android emulator...one moment")
debug("From: " + self.sdk.get_emulator())
debug("SDCard: " + self.sdcard)
if avd_name is None:
debug("AVD ID: " + avd_id)
debug("AVD Skin: " + avd_skin)
else:
debug("AVD Name: " + avd_name)
if avd_abi is not None:
debug("AVD ABI: " + avd_abi)
debug("SDK: " + sdk_dir)
# make sure adb is running on windows, else XP can lockup the python
# process when adb runs first time
if platform.system() == "Windows":
run.run([self.sdk.get_adb(), "start-server"], True, ignore_output=True)
devices = self.sdk.list_devices()
for device in devices:
if device.is_emulator() and device.get_port() == 5560:
info("Emulator is running.")
sys.exit()
# this will create an AVD on demand or re-use existing one if already created
if avd_name == None:
avd_name = self.create_avd(avd_id, avd_skin, avd_abi)
# start the emulator
emulator_cmd = [
self.sdk.get_emulator(),
'-avd',
avd_name,
'-port',
'5560',
'-sdcard',
self.get_sdcard_path(),
'-logcat',
'*:d,*,TiAPI:V',
'-no-boot-anim',
'-partition-size',
'128' # in between nexusone and droid
]
if add_args:
emulator_cmd.extend([arg.strip() for arg in add_args if len(arg.strip()) > 0])
debug(' '.join(emulator_cmd))
p = subprocess.Popen(emulator_cmd)
def handler(signum, frame):
debug("signal caught: %d" % signum)
if not p == None:
debug("calling emulator kill on %d" % p.pid)
if platform.system() == "Windows":
os.system("taskkill /F /T /PID %i" % p.pid)
else:
os.kill(p.pid, signal.SIGTERM)
if platform.system() != "Windows":
signal.signal(signal.SIGHUP, handler)
signal.signal(signal.SIGQUIT, handler)
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGABRT, handler)
signal.signal(signal.SIGTERM, handler)
# give it some time to exit prematurely
time.sleep(1)
rc = p.poll()
if rc != None:
handler(3,None)
sys.exit(rc)
# wait for the emulator to finish
try:
rc = p.wait()
except OSError:
handler(3,None)
info("Android Emulator has exited")
sys.exit(rc)
def check_file_exists(self, path):
output = self.run_adb('shell', 'ls', path)
if output != None:
if output.find("No such file or directory") == -1 \
and output.find("error: device offline") == -1:
return True
return False
def is_app_installed(self):
return self.check_file_exists('/data/app/%s*.apk' % self.app_id)
def get_sdcard_path(self):
# We need to surround the sd card path in quotes for windows to account for spaces in path
if platform.system() == "Windows":
return '"' + self.sdcard + '"'
return self.sdcard
def are_resources_installed(self):
return self.check_file_exists(self.sdcard_resources+'/app.js')
def include_path(self, path, isfile):
if not isfile and os.path.basename(path) in ignoreDirs: return False
elif isfile and os.path.basename(path) in ignoreFiles: return False
return True
def warn_dupe_drawable_folders(self):
tocheck = ('high', 'medium', 'low')
image_parent = os.path.join(self.top_dir, 'Resources', 'android', 'images')
for check in tocheck:
if os.path.exists(os.path.join(image_parent, check)) and os.path.exists(os.path.join(image_parent, 'res-%sdpi' % check[0])):
warn('You have both an android/images/%s folder and an android/images/res-%sdpi folder. Files from both of these folders will end up in res/drawable-%sdpi. If two files are named the same, there is no guarantee which one will be copied last and therefore be the one the application uses. You should use just one of these folders to avoid conflicts.' % (check, check[0], check[0]))
def copy_module_platform_folders(self):
for module in self.modules:
platform_folder = os.path.join(module.path, 'platform', 'android')
if os.path.exists(platform_folder):
copy_all(platform_folder, self.project_dir, True, one_time_msg="Copying platform-specific files for '%s' module" % module.manifest.name)
def copy_commonjs_modules(self):
info('Copying CommonJS modules...')
for module in self.modules:
if module.js is None:
continue
module_name = os.path.basename(module.js)
self.non_orphans.append(module_name)
shutil.copy(module.js, self.assets_resources_dir)
def copy_project_platform_folder(self, ignore_dirs=[], ignore_files=[]):
if not os.path.exists(self.platform_dir):
return
copy_all(self.platform_dir, self.project_dir, True, ignore_dirs, ignore_files, one_time_msg="Copying platform-specific files ...")
def copy_resource_drawables(self):
debug('Processing Android resource drawables')
def make_resource_drawable_filename(orig):
normalized = orig.replace(os.sep, "/")
matches = re.search("/android/images/(high|medium|low|res-[^/]+)/(?P<chopped>.*$)", normalized)
if matches and matches.groupdict() and 'chopped' in matches.groupdict():
chopped = matches.groupdict()['chopped'].lower()
for_hash = chopped
if for_hash.endswith('.9.png'):
for_hash = for_hash[:-6] + '.png'
extension = ""
without_extension = chopped
if re.search("\\..*$", chopped):
if chopped.endswith('.9.png'):
extension = '9.png'
without_extension = chopped[:-6]
else:
extension = chopped.split(".")[-1]
without_extension = chopped[:-(len(extension)+1)]
cleaned_without_extension = re.sub(r'[^a-z0-9_]', '_', without_extension)
cleaned_extension = re.sub(r'[^a-z0-9\._]', '_', extension)
result = cleaned_without_extension[:80] + "_" + hashlib.md5(for_hash).hexdigest()[:10]
if extension:
result += "." + extension
return result
else:
trace("Regexp for resource drawable file %s failed" % orig)
return None
def delete_resource_drawable(orig):
folder = resource_drawable_folder(orig)
res_file = os.path.join(self.res_dir, folder, make_resource_drawable_filename(orig))
if os.path.exists(res_file):
try:
trace("DELETING FILE: %s" % res_file)
os.remove(res_file)
except:
warn('Unable to delete %s: %s. Execution will continue.' % (res_file, sys.exc_info()[0]))
def copy_resource_drawable(orig):
partial_folder = resource_drawable_folder(orig)
if not partial_folder:
trace("Could not copy %s; resource folder not determined" % orig)
return
dest_folder = os.path.join(self.res_dir, partial_folder)
dest_filename = make_resource_drawable_filename(orig)
if dest_filename is None:
return
dest = os.path.join(dest_folder, dest_filename)
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
trace("COPYING FILE: %s => %s" % (orig, dest))
shutil.copy(orig, dest)
fileset = []
if self.force_rebuild or self.deploy_type == 'production' or \
(self.js_changed and not self.fastdev):
for root, dirs, files in os.walk(os.path.join(self.top_dir, "Resources")):
remove_ignored_dirs(dirs)
for f in files:
if f in ignoreFiles:
continue
path = os.path.join(root, f)
if is_resource_drawable(path) and f != 'default.png':
fileset.append(path)
else:
if self.project_deltas:
for delta in self.project_deltas:
path = delta.get_path()
if is_resource_drawable(path):
if delta.get_status() == Delta.DELETED:
delete_resource_drawable(path)
else:
fileset.append(path)
if len(fileset) == 0:
return False
for f in fileset:
copy_resource_drawable(f)
return True
def copy_project_resources(self):
info("Copying project resources..")
def validate_filenames(topdir):
for root, dirs, files in os.walk(topdir):
remove_ignored_dirs(dirs)
for d in dirs:
if d == "iphone" or d == "mobileweb":
dirs.remove(d)
for filename in files:
if filename.startswith("_"):
error("%s is an invalid filename. Android will not package assets whose filenames start with underscores. Fix and rebuild." % os.path.join(root, filename))
sys.exit(1)
resources_dir = os.path.join(self.top_dir, 'Resources')
validate_filenames(resources_dir)
android_resources_dir = os.path.join(resources_dir, 'android')
self.project_deltafy = Deltafy(resources_dir, include_callback=self.include_path)
self.project_deltas = self.project_deltafy.scan()
self.js_changed = False
tiapp_delta = self.project_deltafy.scan_single_file(self.project_tiappxml)
self.tiapp_changed = tiapp_delta is not None
full_copy = not os.path.exists(self.assets_resources_dir)
if self.tiapp_changed or self.force_rebuild or full_copy:
info("Detected change in tiapp.xml, or assets deleted. Forcing full re-build...")
# force a clean scan/copy when the tiapp.xml has changed
self.project_deltafy.clear_state()
self.project_deltas = self.project_deltafy.scan()
# rescan tiapp.xml so it doesn't show up as created next time around
self.project_deltafy.scan_single_file(self.project_tiappxml)
if self.tiapp_changed:
for root, dirs, files in os.walk(self.project_gen_dir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
def strip_slash(s):
if s[0:1]=='/' or s[0:1]=='\\': return s[1:]
return s
def make_relative(path, relative_to, prefix=None):
relative_path = strip_slash(path[len(relative_to):])
if prefix is not None:
return os.path.join(prefix, relative_path)
return relative_path
for delta in self.project_deltas:
path = delta.get_path()
if re.search("android/images/(high|medium|low|res-[^/]+)/", path.replace(os.sep, "/")):
continue # density images are handled later
if delta.get_status() == Delta.DELETED and path.startswith(android_resources_dir):
shared_path = path.replace(android_resources_dir, resources_dir, 1)
if os.path.exists(shared_path):
dest = make_relative(shared_path, resources_dir, self.assets_resources_dir)
trace("COPYING FILE: %s => %s (platform-specific file was removed)" % (shared_path, dest))
shutil.copy(shared_path, dest)
if delta.get_status() != Delta.DELETED:
if path.startswith(android_resources_dir):
dest = make_relative(path, android_resources_dir, self.assets_resources_dir)
else:
# don't copy it if there is an android-specific file
if os.path.exists(path.replace(resources_dir, android_resources_dir, 1)):
continue
dest = make_relative(path, resources_dir, self.assets_resources_dir)
if path.startswith(os.path.join(resources_dir, "iphone")) or path.startswith(os.path.join(resources_dir, "mobileweb")) or path.startswith(os.path.join(resources_dir, "blackberry")):
continue
parent = os.path.dirname(dest)
if not os.path.exists(parent):
os.makedirs(parent)
trace("COPYING %s FILE: %s => %s" % (delta.get_status_str(), path, dest))
shutil.copy(path, dest)
if (path.startswith(resources_dir) or path.startswith(android_resources_dir)) and path.endswith(".js"):
self.js_changed = True
# copy to the sdcard in development mode
if self.sdcard_copy and self.app_installed and (self.deploy_type == 'development' or self.deploy_type == 'test'):
if path.startswith(android_resources_dir):
relative_path = make_relative(delta.get_path(), android_resources_dir)
else:
relative_path = make_relative(delta.get_path(), resources_dir)
relative_path = relative_path.replace("\\", "/")
self.run_adb('push', delta.get_path(), "%s/%s" % (self.sdcard_resources, relative_path))
if os.environ.has_key('LIVEVIEW'):
debug("LiveView enabled")
appjs = os.path.join(self.assets_resources_dir, 'app.js')
_appjs = os.path.join(self.assets_resources_dir, '_app.js')
liveviewjs = os.path.join(tempfile.gettempdir(), 'liveview.js')
self.non_orphans.append('_app.js')
if not os.path.exists(appjs):
debug('app.js not found: %s' % appjs)
if not os.path.exists(liveviewjs):
debug('liveviewjs.js not found: %s' % liveviewjs)
if os.path.exists(appjs) and os.path.exists(liveviewjs):
trace("COPYING %s => %s" % (appjs, _appjs))
shutil.copy(appjs, _appjs)
trace("COPYING %s => %s" % (liveviewjs, appjs))
shutil.copy(liveviewjs, appjs)
else:
debug('LiveView not enabled')
index_json_path = os.path.join(self.assets_dir, "index.json")
if len(self.project_deltas) > 0 or not os.path.exists(index_json_path):
requireIndex.generateJSON(self.assets_dir, index_json_path)
def check_permissions_mapping(self, key, permissions_mapping, permissions_list):
try:
perms = permissions_mapping[key]
if perms:
for perm in perms:
try:
permissions_list.index(perm)
except:
permissions_list.append(perm)
except:
pass
def generate_android_manifest(self,compiler):
self.generate_localizations()
self.remove_duplicate_res()
# NOTE: these are built-in permissions we need -- we probably need to refine when these are needed too
permissions_required = ['INTERNET','ACCESS_WIFI_STATE','ACCESS_NETWORK_STATE', 'WRITE_EXTERNAL_STORAGE']
GEO_PERMISSION = [ 'ACCESS_COARSE_LOCATION', 'ACCESS_FINE_LOCATION']
CONTACTS_READ_PERMISSION = ['READ_CONTACTS']
CONTACTS_PERMISSION = ['READ_CONTACTS', 'WRITE_CONTACTS']
CALENDAR_PERMISSION = ['READ_CALENDAR', 'WRITE_CALENDAR']
VIBRATE_PERMISSION = ['VIBRATE']
CAMERA_PERMISSION = ['CAMERA']
WALLPAPER_PERMISSION = ['SET_WALLPAPER']
# Enable mock location if in development or test mode.
if self.deploy_type == 'development' or self.deploy_type == 'test':
GEO_PERMISSION.append('ACCESS_MOCK_LOCATION')
# this is our module to permission(s) trigger - for each module on the left, require the permission(s) on the right
permissions_module_mapping = {
# GEO
'geolocation' : GEO_PERMISSION
}
# this is our module method to permission(s) trigger - for each method on the left, require the permission(s) on the right
permissions_method_mapping = {
# MAP
'Map.createView' : GEO_PERMISSION,
# MEDIA
'Media.vibrate' : VIBRATE_PERMISSION,
'Media.showCamera' : CAMERA_PERMISSION,
# CONTACTS
'Contacts.createPerson' : CONTACTS_PERMISSION,
'Contacts.removePerson' : CONTACTS_PERMISSION,
'Contacts.getAllContacts' : CONTACTS_READ_PERMISSION,
'Contacts.showContactPicker' : CONTACTS_READ_PERMISSION,
'Contacts.showContacts' : CONTACTS_READ_PERMISSION,
'Contacts.getPersonByID' : CONTACTS_READ_PERMISSION,
'Contacts.getPeopleWithName' : CONTACTS_READ_PERMISSION,
'Contacts.getAllPeople' : CONTACTS_READ_PERMISSION,
'Contacts.getAllGroups' : CONTACTS_READ_PERMISSION,
'Contacts.getGroupByID' : CONTACTS_READ_PERMISSION,
# Old CALENDAR
'Android.Calendar.getAllAlerts' : CALENDAR_PERMISSION,
'Android.Calendar.getAllCalendars' : CALENDAR_PERMISSION,
'Android.Calendar.getCalendarById' : CALENDAR_PERMISSION,
'Android.Calendar.getSelectableCalendars' : CALENDAR_PERMISSION,
# CALENDAR
'Calendar.getAllAlerts' : CALENDAR_PERMISSION,
'Calendar.getAllCalendars' : CALENDAR_PERMISSION,
'Calendar.getCalendarById' : CALENDAR_PERMISSION,
'Calendar.getSelectableCalendars' : CALENDAR_PERMISSION,
# WALLPAPER
'Media.Android.setSystemWallpaper' : WALLPAPER_PERMISSION,
}
VIDEO_ACTIVITY = """<activity
android:name="ti.modules.titanium.media.TiVideoActivity"
android:configChanges="keyboardHidden|orientation"
android:theme="@android:style/Theme.NoTitleBar.Fullscreen"
android:launchMode="singleTask"
/>"""
MAP_ACTIVITY = """<activity
android:name="ti.modules.titanium.map.TiMapActivity"
android:configChanges="keyboardHidden|orientation"
android:launchMode="singleTask"
/>
<uses-library android:name="com.google.android.maps" />"""
CAMERA_ACTIVITY = """<activity
android:name="ti.modules.titanium.media.TiCameraActivity"
android:configChanges="keyboardHidden|orientation"
android:theme="@android:style/Theme.Translucent.NoTitleBar.Fullscreen"
/>"""
activity_mapping = {
# MEDIA
'Media.createVideoPlayer' : VIDEO_ACTIVITY,
'Media.showCamera' : CAMERA_ACTIVITY,
# MAPS
'Map.createView' : MAP_ACTIVITY,
}
# this is a map of our APIs to ones that require Google APIs to be available on the device
google_apis = {
"Map.createView" : True
}
activities = []
# figure out which permissions we need based on the used module
for mod in compiler.modules:
self.check_permissions_mapping(mod, permissions_module_mapping, permissions_required)
# figure out which permissions we need based on the used module methods
for mn in compiler.module_methods:
self.check_permissions_mapping(mn, permissions_method_mapping, permissions_required)
try:
mappings = activity_mapping[mn]
try:
if google_apis[mn] and not self.google_apis_supported:
warn("Google APIs detected but a device has been selected that doesn't support them. The API call to Titanium.%s will fail using '%s'" % (mn,my_avd['name']))
continue
except:
pass
try:
activities.index(mappings)
except:
activities.append(mappings)
except:
pass
# Javascript-based activities defined in tiapp.xml
if self.tiapp and self.tiapp.android and 'activities' in self.tiapp.android:
tiapp_activities = self.tiapp.android['activities']
for key in tiapp_activities:
activity = tiapp_activities[key]
if not 'url' in activity:
continue
activity_name = self.app_id + '.' + activity['classname']
activity_str = '<activity \n\t\t\tandroid:name="%s"' % activity_name
for subkey in activity:
if subkey not in ('nodes', 'name', 'url', 'options', 'classname', 'android:name'):
activity_str += '\n\t\t\t%s="%s"' % (subkey, activity[subkey])
if 'android:config' not in activity:
activity_str += '\n\t\t\tandroid:configChanges="keyboardHidden|orientation"'
if 'nodes' in activity:
activity_str += '>'
for node in activity['nodes']:
activity_str += '\n\t\t\t\t' + node.toxml()
activities.append(activity_str + '\n\t\t</activity>\n')
else:
activities.append(activity_str + '\n\t\t/>\n')
activities = set(activities)
services = []
# Javascript-based services defined in tiapp.xml
if self.tiapp and self.tiapp.android and 'services' in self.tiapp.android:
tiapp_services = self.tiapp.android['services']
for key in tiapp_services:
service = tiapp_services[key]
if not 'url' in service:
continue
service_name = self.app_id + '.' + service['classname']
service_str = '<service \n\t\t\tandroid:name="%s"' % service_name
for subkey in service:
if subkey not in ('nodes', 'service_type', 'type', 'name', 'url', 'options', 'classname', 'android:name'):
service_str += '\n\t\t\t%s="%s"' % (subkey, service[subkey])
if 'nodes' in service:
service_str += '>'
for node in service['nodes']:
service_str += '\n\t\t\t\t' + node.toxml()
services.append(service_str + '\n\t\t</service>\n')
else:
services.append(service_str + '\n\t\t/>\n')
self.use_maps = False
self.res_changed = False
icon_name = self.tiapp.properties['icon']
icon_path = os.path.join(self.assets_resources_dir, icon_name)
icon_ext = os.path.splitext(icon_path)[1]
res_drawable_dest = os.path.join(self.project_dir, 'res', 'drawable')
if not os.path.exists(res_drawable_dest):
os.makedirs(res_drawable_dest)
default_icon = os.path.join(self.support_resources_dir, 'default.png')
dest_icon = os.path.join(res_drawable_dest, 'appicon%s' % icon_ext)
if Deltafy.needs_update(icon_path, dest_icon):
self.res_changed = True
debug("copying app icon: %s" % icon_path)
shutil.copy(icon_path, dest_icon)
elif Deltafy.needs_update(default_icon, dest_icon):
self.res_changed = True
debug("copying default app icon")
shutil.copy(default_icon, dest_icon)
# make our Titanium theme for our icon
res_values_dir = os.path.join(self.project_dir, 'res','values')
if not os.path.exists(res_values_dir):
os.makedirs(res_values_dir)
theme_xml = os.path.join(res_values_dir,'theme.xml')
if not os.path.exists(theme_xml):
self.res_changed = True
debug('generating theme.xml')
theme_file = open(theme_xml, 'w')
theme_flags = "Theme"
# We need to treat the default values for fulscreen and
# navbar-hidden the same as android.py does -- false for both.
theme_fullscreen = False
theme_navbarhidden = False
if (self.tiapp.properties.get("fullscreen") == "true" or
self.tiapp.properties.get("statusbar-hidden") == "true"):
theme_fullscreen = True
elif self.tiapp.properties.get("navbar-hidden") == "true":
theme_navbarhidden = True
if theme_fullscreen:
theme_flags += ".NoTitleBar.Fullscreen"
elif theme_navbarhidden:
theme_flags += ".NoTitleBar"
# Wait, one exception. If you want the notification area (very
# top of screen) hidden, but want the title bar in the app,
# there's no theme for that. So we have to use the default theme (no flags)
# and when the application code starts running, the adjustments are then made.
# Only do this when the properties are explicitly set, so as to avoid changing
# old default behavior.
if theme_flags.endswith('.Fullscreen') and \
self.tiapp.properties.get("navbar-hidden") == 'false' and \
('fullscreen' in self.tiapp.explicit_properties or \
'statusbar-hidden' in self.tiapp.explicit_properties) and \
'navbar-hidden' in self.tiapp.explicit_properties:
theme_flags = 'Theme'
TITANIUM_THEME="""<?xml version="1.0" encoding="utf-8"?>
<resources>
<style name="Theme.Titanium" parent="android:%s">
<item name="android:windowBackground">@drawable/background</item>
</style>
</resources>
""" % theme_flags
theme_file.write(TITANIUM_THEME)
theme_file.close()
# create our background image which acts as splash screen during load
resources_dir = os.path.join(self.top_dir, 'Resources')
android_images_dir = os.path.join(resources_dir, 'android', 'images')
# look for density-specific default.png's first
if os.path.exists(android_images_dir):
pattern = r'/android/images/(high|medium|low|res-[^/]+)/default.png'
for root, dirs, files in os.walk(android_images_dir):
remove_ignored_dirs(dirs)
for f in files:
if f in ignoreFiles:
continue
path = os.path.join(root, f)
if re.search(pattern, path.replace(os.sep, "/")):
res_folder = resource_drawable_folder(path)
debug('found %s splash screen at %s' % (res_folder, path))
dest_path = os.path.join(self.res_dir, res_folder)
dest_file = os.path.join(dest_path, 'background.png')
if not os.path.exists(dest_path):
os.makedirs(dest_path)
if Deltafy.needs_update(path, dest_file):
self.res_changed = True
debug('copying %s splash screen to %s' % (path, dest_file))
shutil.copy(path, dest_file)
default_png = os.path.join(self.assets_resources_dir, 'default.png')
support_default_png = os.path.join(self.support_resources_dir, 'default.png')
background_png = os.path.join(self.project_dir, 'res','drawable','background.png')
if os.path.exists(default_png) and Deltafy.needs_update(default_png, background_png):
self.res_changed = True
debug("found splash screen at %s" % os.path.abspath(default_png))
shutil.copy(default_png, background_png)
elif Deltafy.needs_update(support_default_png, background_png):
self.res_changed = True
debug("copying default splash screen")
shutil.copy(support_default_png, background_png)
android_manifest = os.path.join(self.project_dir, 'AndroidManifest.xml')
android_manifest_to_read = android_manifest
# NOTE: allow the user to use their own custom AndroidManifest if they put a file named
# AndroidManifest.xml in platform/android, in which case all bets are off
is_custom = False
# Catch people who may have it in project root (un-released 1.4.x android_native_refactor branch users)
if os.path.exists(os.path.join(self.top_dir, 'AndroidManifest.xml')):
warn('AndroidManifest.xml file in the project root is ignored. Move it to platform/android if you want it to be your custom manifest.')
android_custom_manifest = os.path.join(self.project_dir, 'AndroidManifest.custom.xml')
if not os.path.exists(android_custom_manifest):
android_custom_manifest = os.path.join(self.platform_dir, 'AndroidManifest.xml')
else:
warn('Use of AndroidManifest.custom.xml is deprecated. Please put your custom manifest as "AndroidManifest.xml" in the "platform/android" directory if you do not need to compile for versions < 1.5')
if os.path.exists(android_custom_manifest):
android_manifest_to_read = android_custom_manifest
is_custom = True
info("Detected custom ApplicationManifest.xml -- no Titanium version migration supported")
default_manifest_contents = self.android.render_android_manifest()
if self.sdk.api_level >= HONEYCOMB_MR2_LEVEL:
# Need to add "screenSize" in our default "configChanges" attribute on
# <activity> elements, else changes in orientation will cause the app
# to restart. cf. TIMOB-10863.
default_manifest_contents = default_manifest_contents.replace('|orientation"', '|orientation|screenSize"')
debug("Added 'screenSize' to <activity android:configChanges> because targeted api level %s is >= %s" % (self.sdk.api_level, HONEYCOMB_MR2_LEVEL))
custom_manifest_contents = None
if is_custom:
custom_manifest_contents = open(android_manifest_to_read,'r').read()
manifest_xml = ''
def get_manifest_xml(tiapp, template_obj=None):
xml = ''
if 'manifest' in tiapp.android_manifest:
for manifest_el in tiapp.android_manifest['manifest']:
# since we already track permissions in another way, go ahead and us e that
if manifest_el.nodeName == 'uses-permission' and manifest_el.hasAttribute('android:name'):
if manifest_el.getAttribute('android:name').split('.')[-1] not in permissions_required:
perm_val = manifest_el.getAttribute('android:name')
if template_obj is not None and "${" in perm_val:
perm_val = render_template_with_tiapp(perm_val, template_obj)
permissions_required.append(perm_val)
elif manifest_el.nodeName not in ('supports-screens', 'uses-sdk'):
this_xml = manifest_el.toprettyxml()
if template_obj is not None and "${" in this_xml:
this_xml = render_template_with_tiapp(this_xml, template_obj)
xml += this_xml
return xml
application_xml = ''
def get_application_xml(tiapp, template_obj=None):
xml = ''
if 'application' in tiapp.android_manifest:
for app_el in tiapp.android_manifest['application']:
this_xml = app_el.toxml()
if template_obj is not None and "${" in this_xml:
this_xml = render_template_with_tiapp(this_xml, template_obj)
xml += this_xml
return xml
# add manifest / application entries from tiapp.xml
manifest_xml += get_manifest_xml(self.tiapp)
application_xml += get_application_xml(self.tiapp)
# add manifest / application entries from modules
for module in self.modules:
if module.xml == None: continue
manifest_xml += get_manifest_xml(module.xml, self.tiapp)
application_xml += get_application_xml(module.xml, self.tiapp)
# build the permissions XML based on the permissions detected
permissions_required = set(permissions_required)
permissions_required_xml = ""
for p in permissions_required:
if '.' not in p:
permissions_required_xml+="<uses-permission android:name=\"android.permission.%s\"/>\n\t" % p
else:
permissions_required_xml+="<uses-permission android:name=\"%s\"/>\n\t" % p
def fill_manifest(manifest_source):
ti_activities = '<!-- TI_ACTIVITIES -->'
ti_permissions = '<!-- TI_PERMISSIONS -->'
ti_manifest = '<!-- TI_MANIFEST -->'
ti_application = '<!-- TI_APPLICATION -->'
ti_services = '<!-- TI_SERVICES -->'
manifest_source = manifest_source.replace(ti_activities,"\n\n\t\t".join(activities))
manifest_source = manifest_source.replace(ti_services,"\n\n\t\t".join(services))
manifest_source = manifest_source.replace(ti_permissions,permissions_required_xml)
if len(manifest_xml) > 0:
manifest_source = manifest_source.replace(ti_manifest, manifest_xml)
if len(application_xml) > 0:
manifest_source = manifest_source.replace(ti_application, application_xml)
return manifest_source
default_manifest_contents = fill_manifest(default_manifest_contents)
# if a custom uses-sdk or supports-screens has been specified via tiapp.xml
# <android><manifest>..., we need to replace the ones in the generated
# default manifest
supports_screens_node = None
uses_sdk_node = None
if 'manifest' in self.tiapp.android_manifest:
for node in self.tiapp.android_manifest['manifest']:
if node.nodeName == 'uses-sdk':
uses_sdk_node = node
elif node.nodeName == 'supports-screens':
supports_screens_node = node
if supports_screens_node or uses_sdk_node or ('manifest-attributes' in self.tiapp.android_manifest and self.tiapp.android_manifest['manifest-attributes'].length) or ('application-attributes' in self.tiapp.android_manifest and self.tiapp.android_manifest['application-attributes'].length):
dom = parseString(default_manifest_contents)
def replace_node(olddom, newnode):
nodes = olddom.getElementsByTagName(newnode.nodeName)
retval = False
if nodes:
olddom.documentElement.replaceChild(newnode, nodes[0])
retval = True
return retval
if supports_screens_node:
if not replace_node(dom, supports_screens_node):
dom.documentElement.insertBefore(supports_screens_node, dom.documentElement.firstChild.nextSibling)
if uses_sdk_node:
replace_node(dom, uses_sdk_node)
def set_attrs(element, new_attr_set):
for k in new_attr_set.keys():
if element.hasAttribute(k):
element.removeAttribute(k)
element.setAttribute(k, new_attr_set.get(k).value)
if 'manifest-attributes' in self.tiapp.android_manifest and self.tiapp.android_manifest['manifest-attributes'].length:
set_attrs(dom.documentElement, self.tiapp.android_manifest['manifest-attributes'])
if 'application-attributes' in self.tiapp.android_manifest and self.tiapp.android_manifest['application-attributes'].length:
set_attrs(dom.getElementsByTagName('application')[0], self.tiapp.android_manifest['application-attributes'])
default_manifest_contents = dom.toxml()
if application_xml:
# If the tiapp.xml <manifest><application> section was not empty, it could be
# that user put in <activity> entries that duplicate our own,
# such as if they want a custom theme on TiActivity. So we should delete any dupes.
dom = parseString(default_manifest_contents)
package_name = dom.documentElement.getAttribute('package')
manifest_activities = dom.getElementsByTagName('activity')
activity_names = []
nodes_to_delete = []
for manifest_activity in manifest_activities:
if manifest_activity.hasAttribute('android:name'):
activity_name = manifest_activity.getAttribute('android:name')
if activity_name.startswith('.'):
activity_name = package_name + activity_name
if activity_name in activity_names:
nodes_to_delete.append(manifest_activity)
else:
activity_names.append(activity_name)
if nodes_to_delete:
for node_to_delete in nodes_to_delete:
node_to_delete.parentNode.removeChild(node_to_delete)
default_manifest_contents = dom.toxml()
if custom_manifest_contents:
custom_manifest_contents = fill_manifest(custom_manifest_contents)
new_manifest_contents = None
android_manifest_gen = android_manifest + '.gen'
if custom_manifest_contents:
new_manifest_contents = custom_manifest_contents
# Write the would-be default as well so user can see
# some of the auto-gen'd insides of it if they need/want.
amf = open(android_manifest + '.gen', 'w')
amf.write(default_manifest_contents)
amf.close()
else:
new_manifest_contents = default_manifest_contents
if os.path.exists(android_manifest_gen):
os.remove(android_manifest_gen)
manifest_changed = False
old_contents = None
if os.path.exists(android_manifest):
old_contents = open(android_manifest, 'r').read()
if new_manifest_contents != old_contents:
trace("Writing out AndroidManifest.xml")
amf = open(android_manifest,'w')
amf.write(new_manifest_contents)
amf.close()
manifest_changed = True
if self.res_changed or manifest_changed:
res_dir = os.path.join(self.project_dir, 'res')
output = run.run([self.aapt, 'package', '-m',
'-J', self.project_gen_dir,
'-M', android_manifest,
'-S', res_dir,
'-I', self.android_jar], warning_regex=r'skipping')
r_file = os.path.join(self.project_gen_dir, self.app_id.replace('.', os.sep), 'R.java')
if not os.path.exists(r_file) or (self.res_changed and output == None):
error("Error generating R.java from manifest")
sys.exit(1)
return manifest_changed
def generate_stylesheet(self):
update_stylesheet = False
resources_dir = os.path.join(self.top_dir, 'Resources')
project_gen_pkg_dir = os.path.join(self.project_gen_dir, self.app_id.replace('.', os.sep))
app_stylesheet = os.path.join(project_gen_pkg_dir, 'ApplicationStylesheet.java')
if not os.path.exists(app_stylesheet):
update_stylesheet = True
else:
for root, dirs, files in os.walk(resources_dir, True, None, True):
remove_ignored_dirs(dirs)
for f in files:
if f in ignoreFiles:
continue
if f.endswith(".jss"):
absolute_path = os.path.join(root, f)
if Deltafy.needs_update(absolute_path, app_stylesheet):
update_stylesheet = True
break
if not update_stylesheet:
return
cssc = csscompiler.CSSCompiler(resources_dir, 'android', self.app_id)
if not os.path.exists(project_gen_pkg_dir):
os.makedirs(project_gen_pkg_dir)
debug("app stylesheet => %s" % app_stylesheet)
asf = codecs.open(app_stylesheet, 'w', 'utf-8')
asf.write(cssc.code)
asf.close()
def generate_localizations(self):
# compile localization files
localecompiler.LocaleCompiler(self.name,self.top_dir,'android',sys.argv[1]).compile()
# fix un-escaped single-quotes and full-quotes
# remove duplicate strings since we merge strings.xml from /i18n/ and /platform/android/res/values (TIMOB-12663)
offending_pattern = '[^\\\\][\'"]'
for root, dirs, files in os.walk(self.res_dir):
remove_ignored_dirs(dirs)
for filename in files:
if filename in ignoreFiles or not filename.endswith('.xml'):
continue
string_name_list = [] #keeps track of the string names
full_path = os.path.join(root, filename)
f = codecs.open(full_path, 'r', 'utf-8')
contents = f.read()
f.close()
if not re.search(r"<string ", contents):
continue
doc = parseString(contents.encode("utf-8"))
string_nodes = doc.getElementsByTagName('string')
resources_node = doc.getElementsByTagName('resources')[0]
if len(string_nodes) == 0:
continue
made_change = False
for string_node in string_nodes:
name = string_node.getAttribute('name')
# Remove the string node with the duplicate names
if name in string_name_list:
resources_node.removeChild(string_node)
made_change = True
debug('Removed duplicate string [%s] from %s' %(name, full_path))
else:
string_name_list.append(name)
if not string_node.hasChildNodes():
continue
string_child = string_node.firstChild
if string_child.nodeType == string_child.CDATA_SECTION_NODE or string_child.nodeType == string_child.TEXT_NODE:
string_value = string_child.nodeValue
if not re.search(offending_pattern, string_value):
continue
offenders = re.findall(offending_pattern, string_value)
if offenders:
for offender in offenders:
string_value = string_value.replace(offender, offender[0] + "\\" + offender[-1:])
made_change = True
string_child.nodeValue = string_value
if made_change:
new_contents = doc.toxml()
f = codecs.open(full_path, 'w', 'utf-8')
f.write(new_contents)
f.close()
def remove_duplicate_res(self):
for root, dirs, files in os.walk(self.res_dir):
remove_ignored_dirs(dirs)
for filename in files:
if not (filename in resourceFiles):
continue
full_path = os.path.join(root, filename)
node_names_to_check = ["string", "bool", "color", "dimen", "item", "integer",
"array", "integer-array", "string-array", "declare-styleable", "attr", "style"]
# "strings.xml" is checked in generate_localizations()
if filename != "strings.xml":
remove_duplicate_nodes_in_res_file(full_path, node_names_to_check)
def recurse(self, paths, file_glob=None):
if paths == None: yield None
if not isinstance(paths, list): paths = [paths]
for path in paths:
for root, dirs, files in os.walk(path):
remove_ignored_dirs(dirs)
for filename in files:
if filename in ignoreFiles:
continue
if file_glob != None:
if not fnmatch.fnmatch(filename, file_glob): continue
yield os.path.join(root, filename)
def generate_aidl(self):
# support for android remote interfaces in platform/android/src
framework_aidl = self.sdk.platform_path('framework.aidl')
aidl_args = [self.sdk.get_aidl(), '-p' + framework_aidl, '-I' + self.project_src_dir, '-o' + self.project_gen_dir]
for aidl_file in self.recurse(self.project_src_dir, '*.aidl'):
run.run(aidl_args + [aidl_file])
def build_generated_classes(self):
src_list = []
self.module_jars = []
classpath = os.pathsep.join([self.android_jar, os.pathsep.join(self.android_jars)])
project_module_dir = os.path.join(self.top_dir,'modules','android')
for module in self.modules:
if module.jar == None: continue
self.module_jars.append(module.jar)
classpath = os.pathsep.join([classpath, module.jar])
module_lib = module.get_resource('lib')
for jar in glob.glob(os.path.join(module_lib, '*.jar')):
self.module_jars.append(jar)
classpath = os.pathsep.join([classpath, jar])
if len(self.module_jars) > 0:
# kroll-apt.jar is needed for modules
classpath = os.pathsep.join([classpath, self.kroll_apt_jar])
classpath = os.pathsep.join([classpath, os.path.join(self.support_dir, 'lib', 'titanium-verify.jar')])
if self.deploy_type != 'production':
classpath = os.pathsep.join([classpath, os.path.join(self.support_dir, 'lib', 'titanium-debug.jar')])
classpath = os.pathsep.join([classpath, os.path.join(self.support_dir, 'lib', 'titanium-profiler.jar')])
for java_file in self.recurse([self.project_src_dir, self.project_gen_dir], '*.java'):
if self.project_src_dir in java_file:
relative_path = java_file[len(self.project_src_dir)+1:]
else:
relative_path = java_file[len(self.project_gen_dir)+1:]
class_file = os.path.join(self.classes_dir, relative_path.replace('.java', '.class'))
if Deltafy.needs_update(java_file, class_file) > 0:
# the file list file still needs each file escaped apparently
debug("adding %s to javac build list" % java_file)
src_list.append('"%s"' % java_file.replace("\\", "\\\\"))
if len(src_list) == 0:
# No sources are older than their classfile counterparts, we can skip javac / dex
return False
debug("Building Java Sources: " + " ".join(src_list))
javac_command = [self.javac, '-encoding', 'utf8',
'-classpath', classpath, '-d', self.classes_dir, '-proc:none',
'-sourcepath', self.project_src_dir,
'-sourcepath', self.project_gen_dir, '-target', '1.6', '-source', '1.6']
(src_list_osfile, src_list_filename) = tempfile.mkstemp()
src_list_file = os.fdopen(src_list_osfile, 'w')
src_list_file.write("\n".join(src_list))
src_list_file.close()
javac_command.append('@' + src_list_filename)
(out, err, javac_process) = run.run(javac_command, ignore_error=True, return_error=True, return_process=True)
os.remove(src_list_filename)
if javac_process.returncode != 0:
error("Error(s) compiling generated Java code")
error(str(err))
sys.exit(1)
return True
def create_unsigned_apk(self, resources_zip_file, webview_js_files=None):
unsigned_apk = os.path.join(self.project_dir, 'bin', 'app-unsigned.apk')
self.apk_updated = False
apk_modified = None
if os.path.exists(unsigned_apk):
apk_modified = Deltafy.get_modified_datetime(unsigned_apk)
debug("creating unsigned apk: " + unsigned_apk)
# copy existing resources into the APK
apk_zip = zipfile.ZipFile(unsigned_apk, 'w', zipfile.ZIP_DEFLATED)
def skip_jar_path(path):
ext = os.path.splitext(path)[1]
if path.endswith('/'): return True
if path.startswith('META-INF/'): return True
if path.split('/')[-1].startswith('.'): return True
if ext == '.class': return True
if 'org/appcelerator/titanium/bindings' in path and ext == '.json': return True
if 'tiapp' in path and ext =='.xml': return True
def skip_js_file(path):
return self.compile_js is True and \
os.path.splitext(path)[1] == '.js' and \
os.path.join(self.project_dir, "bin", path) not in webview_js_files
def compression_type(path):
ext = os.path.splitext(path)[1]
if ext in uncompressed_types:
return zipfile.ZIP_STORED
return zipfile.ZIP_DEFLATED
def zipinfo(path):
info = zipfile.ZipInfo(path)
info.compress_type = compression_type(path)
return info
def is_modified(path):
return apk_modified is None or Deltafy.needs_update_timestamp(path, apk_modified)
def zip_contains(zip, entry):
try:
zip.getinfo(entry)
except:
return False
return True
if is_modified(resources_zip_file):
self.apk_updated = True
resources_zip = zipfile.ZipFile(resources_zip_file)
for path in resources_zip.namelist():
if skip_jar_path(path) or skip_js_file(path): continue
debug("from resource zip => " + path)
apk_zip.writestr(zipinfo(path), resources_zip.read(path))
resources_zip.close()
# add classes.dex
if is_modified(self.classes_dex) or not zip_contains(apk_zip, 'classes.dex'):
apk_zip.write(self.classes_dex, 'classes.dex')
# add all resource files from the project
for root, dirs, files in os.walk(self.project_src_dir, True, None, True):
remove_ignored_dirs(dirs)
for f in files:
if f in ignoreFiles:
continue
if os.path.splitext(f)[1] != '.java':
absolute_path = os.path.join(root, f)
relative_path = os.path.join(root[len(self.project_src_dir)+1:], f)
if is_modified(absolute_path) or not zip_contains(apk_zip, relative_path):
self.apk_updated = True
debug("resource file => " + relative_path)
apk_zip.write(os.path.join(root, f), relative_path, compression_type(f))
def add_resource_jar(jar_file):
jar = zipfile.ZipFile(jar_file)
for path in jar.namelist():
if skip_jar_path(path): continue
debug("from JAR %s => %s" % (jar_file, path))
apk_zip.writestr(zipinfo(path), jar.read(path))
jar.close()
for jar_file in self.module_jars:
add_resource_jar(jar_file)
for jar_file in self.android_jars:
add_resource_jar(jar_file)
def add_native_libs(libs_dir, exclude=[]):
if os.path.exists(libs_dir):
for abi_dir in os.listdir(libs_dir):
if abi_dir not in self.abis:
continue
libs_abi_dir = os.path.join(libs_dir, abi_dir)
if not os.path.isdir(libs_abi_dir): continue
for file in os.listdir(libs_abi_dir):
if file.endswith('.so') and file not in exclude:
native_lib = os.path.join(libs_abi_dir, file)
path_in_zip = '/'.join(['lib', abi_dir, file])
if is_modified(native_lib) or not zip_contains(apk_zip, path_in_zip):
self.apk_updated = True
debug("installing native lib: %s" % native_lib)
apk_zip.write(native_lib, path_in_zip)
# add module native libraries
for module in self.modules:
exclude_libs = []
add_native_libs(module.get_resource('libs'), exclude_libs)
# add any native libraries : libs/**/*.so -> lib/**/*.so
add_native_libs(os.path.join(self.project_dir, 'libs'))
# add sdk runtime native libraries
debug("installing native SDK libs")
sdk_native_libs = os.path.join(template_dir, 'native', 'libs')
for abi in self.abis:
lib_source_dir = os.path.join(sdk_native_libs, abi)
lib_dest_dir = 'lib/%s/' % abi
# libtiverify is always included
apk_zip.write(os.path.join(lib_source_dir, 'libtiverify.so'), lib_dest_dir + 'libtiverify.so')
# profiler
apk_zip.write(os.path.join(lib_source_dir, 'libtiprofiler.so'), lib_dest_dir + 'libtiprofiler.so')
for fname in ('libkroll-v8.so', 'libstlport_shared.so'):
apk_zip.write(os.path.join(lib_source_dir, fname), lib_dest_dir + fname)
self.apk_updated = True
apk_zip.close()
return unsigned_apk
def run_adb(self, *args):
command = [self.sdk.get_adb()]
command.extend(self.device_args)
command.extend(args)
return run.run(command)
def get_sigalg(self):
output = run.run([self.keytool,
'-v',
'-list',
'-keystore', self.keystore,
'-storepass', self.keystore_pass,
'-alias', self.keystore_alias
], protect_arg_positions=(6,))
# If the keytool encounters an error, that means some of the provided
# keychain info is invalid and we should bail anyway
run.check_output_for_error(output, r'RuntimeException: (.*)', True)
run.check_output_for_error(output, r'^keytool: (.*)', True)
match = re.search(r'Signature algorithm name: (.*)', output)
if match is not None:
return match.group(1)
# Return the default:
return "MD5withRSA"
def package_and_deploy(self):
# If in production mode and compiling JS, we do not package the JS
# files as assets (we protect them from prying eyes). But if a JS
# file is referenced in an html <script> tag, we DO need to package it.
def get_js_referenced_in_html():
js_files = []
for root, dirs, files in os.walk(self.assets_dir):
for one_file in files:
if one_file.lower().endswith(".html"):
full_path = os.path.join(root, one_file)
html_source = None
file_stream = None
try:
file_stream = open(full_path, "r")
html_source = file_stream.read()
except:
error("Unable to read html file '%s'" % full_path)
finally:
file_stream.close()
if html_source:
parser = HTMLParser()
parser.parse(html_source)
relative_js_files = parser.get_referenced_js_files()
if relative_js_files:
for one_rel_js_file in relative_js_files:
if one_rel_js_file.startswith("http:") or one_rel_js_file.startswith("https:"):
continue
if one_rel_js_file.startswith("app://"):
one_rel_js_file = one_rel_js_file[6:]
js_files.append(os.path.abspath(os.path.join(os.path.dirname(full_path), one_rel_js_file)))
return js_files
ap_ = os.path.join(self.project_dir, 'bin', 'app.ap_')
# This is only to check if this has been overridden in production
has_compile_js = self.tiappxml.has_app_property("ti.android.compilejs")
compile_js = not has_compile_js or (has_compile_js and \
self.tiappxml.to_bool(self.tiappxml.get_app_property('ti.android.compilejs')))
# JS files referenced in html files and thus likely needed for webviews.
webview_js_files = []
pkg_assets_dir = self.assets_dir
if self.deploy_type == "test":
compile_js = False
if compile_js and os.environ.has_key('SKIP_JS_MINIFY'):
compile_js = False
info("Disabling JavaScript minification")
if self.deploy_type == "production" and compile_js:
webview_js_files = get_js_referenced_in_html()
non_js_assets = os.path.join(self.project_dir, 'bin', 'non-js-assets')
if not os.path.exists(non_js_assets):
os.mkdir(non_js_assets)
copy_all(self.assets_dir, non_js_assets, ignore_exts=['.js'])
# if we have any js files referenced in html, we *do* need
# to package them as if they are non-js assets.
if webview_js_files:
for one_js_file in webview_js_files:
if os.path.exists(one_js_file):
dest_file = one_js_file.replace(self.assets_dir, non_js_assets, 1)
if not os.path.exists(os.path.dirname(dest_file)):
os.makedirs(os.path.dirname(dest_file))
shutil.copyfile(one_js_file, dest_file)
pkg_assets_dir = non_js_assets
run.run([self.aapt, 'package', '-f', '-M', 'AndroidManifest.xml', '-A', pkg_assets_dir,
'-S', 'res', '-I', self.android_jar, '-I', self.titanium_jar, '-F', ap_], warning_regex=r'skipping')
unsigned_apk = self.create_unsigned_apk(ap_, webview_js_files)
if self.dist_dir:
app_apk = os.path.join(self.dist_dir, self.name + '.apk')
else:
app_apk = os.path.join(self.project_dir, 'bin', 'app.apk')
output = run.run([self.jarsigner,
'-sigalg', self.get_sigalg(),
'-digestalg', 'SHA1',
'-storepass', self.keystore_pass,
'-keystore', self.keystore,
'-signedjar', app_apk,
unsigned_apk,
self.keystore_alias], protect_arg_positions=(6,))
run.check_output_for_error(output, r'RuntimeException: (.*)', True)
run.check_output_for_error(output, r'^jarsigner: (.*)', True)
# TODO Document Exit message
#success = re.findall(r'RuntimeException: (.*)', output)
#if len(success) > 0:
# error(success[0])
# sys.exit(1)
# zipalign to align byte boundaries
zipalign = self.sdk.get_zipalign()
if os.path.exists(app_apk+'z'):
os.remove(app_apk+'z')
ALIGN_32_BIT = 4
output = run.run([zipalign, '-v', str(ALIGN_32_BIT), app_apk, app_apk+'z'])
# TODO - Document Exit message
if output == None:
error("System Error while compiling Android classes.dex")
sys.exit(1)
else:
os.unlink(app_apk)
os.rename(app_apk+'z',app_apk)
if self.dist_dir:
self.post_build()
sys.exit()
if self.build_only:
return (False, False)
out = self.run_adb('get-state')
#out = subprocess.Popen([self.sdk.get_adb(), self.device_type_arg, 'get-state'], stderr=subprocess.PIPE, stdout=subprocess.PIPE).communicate()[0]
out = str(out).strip()
# try a few times as sometimes it fails waiting on boot
attempts = 0
launched = False
launch_failed = False
while attempts < 5:
try:
if self.install:
self.wait_for_device('d')
info("Installing application on device")
else:
self.wait_for_device('e')
info("Installing application on emulator")
output = self.run_adb('install', '-r', app_apk)
#output = run.run(cmd)
if output == None:
launch_failed = True
elif "Failure" in output:
error("Failed installing %s: %s" % (self.app_id, output))
launch_failed = True
elif not self.install:
launched = True
break
except Exception, e:
error(e)
time.sleep(3)
attempts+=1
return (launched, launch_failed)
def run_app(self):
info("Launching application ... %s" % self.name)
output = self.run_adb('shell', 'am', 'start',
'-a', 'android.intent.action.MAIN',
'-c','android.intent.category.LAUNCHER',
'-n', '%s/.%sActivity' % (self.app_id , self.classname),
'-f', '0x10200000')
trace("Launch output: %s" % output)
def wait_for_sdcard(self):
# Quick check: the existence of /sdcard/Android,
# which really should be there on all phones and emulators.
output = self.run_adb('shell', 'cd /sdcard/Android && echo SDCARD READY')
if 'SDCARD READY' in output:
return True
# Our old way of checking in case the above
# didn't succeed:
mount_points_check = ['/sdcard', '/mnt/sdcard']
# Check the symlink that is typically in root.
# If you find it, add its target to the mount points to check.
output = self.run_adb('shell', 'ls', '-l', '/sdcard')
if output:
target_pattern = r"\-\> (\S+)\s*$"
mount_points_check.extend(re.findall(target_pattern, output))
info("Waiting for SDCard to become available..")
waited = 0
max_wait = 60
while waited < max_wait:
output = self.run_adb('shell', 'mount')
if output != None:
mount_points = output.splitlines()
for mount_point in mount_points:
tokens = mount_point.split()
if len(tokens) < 2: continue
mount_path = tokens[1]
if mount_path in mount_points_check:
return True
else:
error("Error checking for SDCard using 'mount'")
return False
time.sleep(1)
waited += 1
error("Timed out waiting for SDCard to become available (%ds)" % max_wait)
return False
def push_deploy_json(self):
deploy_data = {
"debuggerEnabled": self.debugger_host != None,
"debuggerPort": self.debugger_port,
"profilerEnabled": self.profiler_host != None,
"profilerPort": self.profiler_port,
"fastdevPort": self.fastdev_port
}
deploy_json = os.path.join(self.project_dir, 'bin', 'deploy.json')
open(deploy_json, 'w+').write(simplejson.dumps(deploy_data))
sdcard_available = self.wait_for_sdcard()
if sdcard_available:
self.run_adb('shell', 'mkdir /sdcard/%s || echo' % self.app_id)
self.run_adb('push', deploy_json, '/sdcard/%s/deploy.json' % self.app_id)
os.unlink(deploy_json)
def verify_fastdev(self):
lock_file = os.path.join(self.top_dir, '.fastdev.lock')
if not fastdev.is_running(self.top_dir):
if os.path.exists(lock_file):
os.unlink(lock_file)
return False
else:
data = simplejson.loads(open(lock_file, 'r').read())
self.fastdev_port = data["port"]
return True
def fastdev_kill_app(self):
lock_file = os.path.join(self.top_dir, ".fastdev.lock")
if os.path.exists(lock_file):
class Options(object): pass
options = Options()
options.lock_file = lock_file
try:
return fastdev.kill_app(self.top_dir, options)
except Exception, e:
return False
def merge_internal_module_resources(self):
if not self.android_jars:
return
for jar in self.android_jars:
if not os.path.exists(jar):
continue
res_zip = jar[:-4] + '.res.zip'
if not os.path.exists(res_zip):
continue
res_zip_file = zipfile.ZipFile(res_zip, "r")
try:
zip_extractall(res_zip_file, self.project_dir)
except:
raise
finally:
res_zip_file.close()
def build_and_run(self, install, avd_id, keystore=None, keystore_pass='tirocks', keystore_alias='tidev', dist_dir=None, build_only=False, device_args=None, debugger_host=None, profiler_host=None):
deploy_type = 'development'
self.build_only = build_only
self.device_args = device_args
self.postbuild_modules = []
self.finalize_modules = []
self.non_orphans = []
if install:
if self.device_args == None:
self.device_args = ['-d']
if keystore == None:
deploy_type = 'test'
else:
deploy_type = 'production'
if self.device_args == None:
self.device_args = ['-e']
self.deploy_type = deploy_type
(java_failed, java_status) = prereq.check_java()
if java_failed:
error(java_status)
sys.exit(1)
# attempt to load any compiler plugins
if len(self.tiappxml.properties['plugins']) > 0:
titanium_dir = os.path.abspath(os.path.join(template_dir,'..','..','..','..'))
local_compiler_dir = os.path.abspath(os.path.join(self.top_dir,'plugins'))
tp_compiler_dir = os.path.abspath(os.path.join(titanium_dir,'plugins'))
if not os.path.exists(tp_compiler_dir) and not os.path.exists(local_compiler_dir):
error("Build Failed (Missing plugins directory)")
sys.exit(1)
compiler_config = {
'platform':'android',
'tiapp':self.tiappxml,
'project_dir':self.top_dir,
'titanium_dir':titanium_dir,
'appid':self.app_id,
'template_dir':template_dir,
'project_name':self.name,
'command':self.command,
'build_dir':self.project_dir,
'app_name':self.name,
'android_builder':self,
'deploy_type':deploy_type,
'dist_dir':dist_dir,
'logger':log
}
for plugin in self.tiappxml.properties['plugins']:
local_plugin_file = os.path.join(local_compiler_dir,plugin['name'],'plugin.py')
plugin_file = os.path.join(tp_compiler_dir,plugin['name'],plugin['version'],'plugin.py')
info("plugin=%s" % plugin_file)
if not os.path.exists(local_plugin_file) and not os.path.exists(plugin_file):
error("Build Failed (Missing plugin for %s)" % plugin['name'])
sys.exit(1)
info("Detected compiler plugin: %s/%s" % (plugin['name'],plugin['version']))
code_path = plugin_file
if os.path.exists(local_plugin_file):
code_path = local_plugin_file
compiler_config['plugin']=plugin
fin = open(code_path, 'rb')
m = hashlib.md5()
m.update(open(code_path,'rb').read())
code_hash = m.hexdigest()
p = imp.load_source(code_hash, code_path, fin)
module_functions = dict(inspect.getmembers(p, inspect.isfunction))
if module_functions.has_key('postbuild'):
debug("plugin contains a postbuild function. Will execute after project is built and packaged")
self.postbuild_modules.append((plugin['name'], p))
if module_functions.has_key('finalize'):
debug("plugin contains a finalize function. Will execute before script exits")
self.finalize_modules.append((plugin['name'], p))
p.compile(compiler_config)
fin.close()
# in Windows, if the adb server isn't running, calling "adb devices"
# will fork off a new adb server, and cause a lock-up when we
# try to pipe the process' stdout/stderr. the workaround is
# to simply call adb start-server here, and not care about
# the return code / pipes. (this is harmless if adb is already running)
# -- thanks to Bill Dawson for the workaround
if platform.system() == "Windows" and not build_only:
run.run([self.sdk.get_adb(), "start-server"], True, ignore_output=True)
ti_version_file = os.path.join(self.support_dir, '..', 'version.txt')
if os.path.exists(ti_version_file):
ti_version_info = read_properties(open(ti_version_file, 'r'), '=')
if not ti_version_info is None and 'version' in ti_version_info:
ti_version_string = 'Titanium SDK version: %s' % ti_version_info['version']
if 'timestamp' in ti_version_info or 'githash' in ti_version_info:
ti_version_string += ' ('
if 'timestamp' in ti_version_info:
ti_version_string += '%s' % ti_version_info['timestamp']
if 'githash' in ti_version_info:
ti_version_string += ' %s' % ti_version_info['githash']
ti_version_string += ')'
info(ti_version_string)
if not build_only:
if deploy_type == 'development':
self.wait_for_device('e')
elif deploy_type == 'test':
self.wait_for_device('d')
self.install = install
self.dist_dir = dist_dir
self.aapt = self.sdk.get_aapt()
self.android_jar = self.sdk.get_android_jar()
self.titanium_jar = os.path.join(self.support_dir,'titanium.jar')
self.kroll_apt_jar = os.path.join(self.support_dir, 'kroll-apt.jar')
dx = self.sdk.get_dx()
self.apkbuilder = self.sdk.get_apkbuilder()
self.sdcard_resources = '/sdcard/Ti.debug/%s/Resources' % self.app_id
self.resources_installed = False
if deploy_type == "production":
self.app_installed = False
else:
self.app_installed = not build_only and self.is_app_installed()
debug("%s installed? %s" % (self.app_id, self.app_installed))
#self.resources_installed = not build_only and self.are_resources_installed()
#debug("%s resources installed? %s" % (self.app_id, self.resources_installed))
if keystore == None:
keystore = os.path.join(self.support_dir,'dev_keystore')
self.keystore = keystore
self.keystore_pass = keystore_pass
self.keystore_alias = keystore_alias
curdir = os.getcwd()
self.support_resources_dir = os.path.join(self.support_dir, 'resources')
try:
os.chdir(self.project_dir)
self.android = Android(self.name, self.app_id, self.sdk, deploy_type, self.java)
if not os.path.exists('bin'):
os.makedirs('bin')
resources_dir = os.path.join(self.top_dir,'Resources')
self.assets_dir = os.path.join(self.project_dir,'bin','assets')
self.assets_resources_dir = os.path.join(self.assets_dir,'Resources')
if not os.path.exists(self.assets_resources_dir):
os.makedirs(self.assets_resources_dir)
shutil.copy(self.project_tiappxml, self.assets_dir)
finalxml = os.path.join(self.assets_dir,'tiapp.xml')
self.tiapp = TiAppXML(finalxml)
self.tiapp.setDeployType(deploy_type)
self.sdcard_copy = False
sdcard_property = "ti.android.loadfromsdcard"
if self.tiapp.has_app_property(sdcard_property):
self.sdcard_copy = self.tiapp.to_bool(self.tiapp.get_app_property(sdcard_property))
fastdev_property = "ti.android.fastdev"
fastdev_enabled = (self.deploy_type == 'development' and not self.build_only)
if self.tiapp.has_app_property(fastdev_property) and self.deploy_type == 'development':
fastdev_enabled = self.tiapp.to_bool(self.tiapp.get_app_property(fastdev_property))
if fastdev_enabled:
if self.verify_fastdev():
info("Fastdev server running, deploying in Fastdev mode")
self.fastdev = True
else:
warn("Fastdev enabled, but server isn't running, deploying normally")
self.classes_dir = os.path.join(self.project_dir, 'bin', 'classes')
if not os.path.exists(self.classes_dir):
os.makedirs(self.classes_dir)
if (not debugger_host is None) and len(debugger_host) > 0:
hostport = debugger_host.split(":")
self.debugger_host = hostport[0]
self.debugger_port = int(hostport[1])
debugger_enabled = self.debugger_host != None and len(self.debugger_host) > 0
if (not profiler_host is None) and len(profiler_host) > 0:
hostport = profiler_host.split(":")
self.profiler_host = hostport[0]
self.profiler_port = int(hostport[1])
profiler_enabled = self.profiler_host != None and len(self.profiler_host) > 0
# Detect which modules are being used.
# We need to know this info in a few places, so the info is saved
# in self.missing_modules and self.modules
detector = ModuleDetector(self.top_dir)
self.missing_modules, self.modules = detector.find_app_modules(self.tiapp, 'android', deploy_type)
self.copy_commonjs_modules()
self.copy_project_resources()
last_build_info = None
built_all_modules = False
build_info_path = os.path.join(self.project_dir, 'bin', 'build_info.json')
if os.path.exists(build_info_path):
last_build_info = simplejson.loads(open(build_info_path, 'r').read())
built_all_modules = last_build_info["include_all_modules"]
if self.tiapp.has_app_property("ti.android.compilejs"):
if self.tiapp.to_bool(self.tiapp.get_app_property('ti.android.compilejs')):
self.compile_js = True
elif self.tiapp.has_app_property('ti.deploytype'):
if self.tiapp.get_app_property('ti.deploytype') == 'production':
self.compile_js = True
if self.compile_js and os.environ.has_key('SKIP_JS_MINIFY'):
self.compile_js = False
info("Disabling JavaScript minification")
include_all_ti_modules = self.fastdev
if (self.tiapp.has_app_property('ti.android.include_all_modules')):
if self.tiapp.to_bool(self.tiapp.get_app_property('ti.android.include_all_modules')):
include_all_ti_modules = True
if self.tiapp_changed or (self.js_changed and not self.fastdev) or \
self.force_rebuild or self.deploy_type == "production" or \
(self.fastdev and not built_all_modules) or \
(not self.fastdev and built_all_modules):
self.android.config['compile_js'] = self.compile_js
trace("Generating Java Classes")
self.android.create(os.path.abspath(os.path.join(self.top_dir,'..')),
True, project_dir = self.top_dir, include_all_ti_modules=include_all_ti_modules)
open(build_info_path, 'w').write(simplejson.dumps({
"include_all_modules": include_all_ti_modules
}))
else:
info("Tiapp.xml unchanged, skipping class generation")
# compile resources
full_resource_dir = os.path.join(self.project_dir, self.assets_resources_dir)
compiler = Compiler(self.tiapp,
full_resource_dir,
self.java,
self.classes_dir,
self.project_gen_dir,
self.project_dir,
include_all_modules=include_all_ti_modules)
compiler.compile(compile_bytecode=self.compile_js, external_modules=self.modules)
self.compiled_files = compiler.compiled_files
self.android_jars = compiler.jar_libraries
self.merge_internal_module_resources()
if not os.path.exists(self.assets_dir):
os.makedirs(self.assets_dir)
self.resource_drawables_changed = self.copy_resource_drawables()
self.warn_dupe_drawable_folders()
self.copy_module_platform_folders()
special_resources_dir = os.path.join(self.top_dir,'platform','android')
if os.path.exists(special_resources_dir):
debug("found special platform files dir = %s" % special_resources_dir)
ignore_files = ignoreFiles
ignore_files.extend(['AndroidManifest.xml']) # don't want to overwrite build/android/AndroidManifest.xml yet
self.copy_project_platform_folder(ignoreDirs, ignore_files)
self.generate_stylesheet()
self.generate_aidl()
self.manifest_changed = self.generate_android_manifest(compiler)
my_avd = None
self.google_apis_supported = False
# find the AVD we've selected and determine if we support Google APIs
if avd_id is not None:
for avd_props in avd.get_avds(self.sdk):
if avd_props['id'] == avd_id:
my_avd = avd_props
self.google_apis_supported = (my_avd['name'].find('Google')!=-1 or my_avd['name'].find('APIs')!=-1)
break
if build_only or avd_id is None:
self.google_apis_supported = True
remove_orphaned_files(resources_dir, self.assets_resources_dir, self.non_orphans)
generated_classes_built = self.build_generated_classes()
# TODO: enable for "test" / device mode for debugger / fastdev
if not self.build_only and (self.deploy_type == "development" or self.deploy_type == "test"):
self.push_deploy_json()
self.classes_dex = os.path.join(self.project_dir, 'bin', 'classes.dex')
def jar_includer(path, isfile):
if isfile and path.endswith(".jar"): return True
return False
support_deltafy = Deltafy(self.support_dir, jar_includer)
self.support_deltas = support_deltafy.scan()
dex_built = False
if len(self.support_deltas) > 0 or generated_classes_built or self.deploy_type == "production":
# the dx.bat that ships with android in windows doesn't allow command line
# overriding of the java heap space, so we call the jar directly
if platform.system() == 'Windows':
dex_args = [self.java, '-Xmx1024M', '-Djava.ext.dirs=%s' % self.sdk.get_platform_tools_dir(), '-jar', self.sdk.get_dx_jar()]
else:
dex_args = [dx, '-JXmx1536M', '-JXX:-UseGCOverheadLimit']
# Look for New Relic module
newrelic_module = None
for module in self.modules:
if module.path.find("newrelic") > 0:
newrelic_module = module
break
# If New Relic is present, add its Java agent to the dex arguments.
if newrelic_module:
info("Adding New Relic support.")
# Copy the dexer java agent jar to a tempfile. Eliminates white space from
# the module path which causes problems with the dex -Jjavaagent argument.
temp_jar = tempfile.NamedTemporaryFile(suffix='.jar', delete=True)
shutil.copyfile(os.path.join(newrelic_module.path, 'class.rewriter.jar'), temp_jar.name)
dex_args += ['-Jjavaagent:' + os.path.join(temp_jar.name)]
dex_args += ['--dex', '--output='+self.classes_dex, self.classes_dir]
dex_args += self.android_jars
dex_args += self.module_jars
dex_args.append(os.path.join(self.support_dir, 'lib', 'titanium-verify.jar'))
if self.deploy_type != 'production':
dex_args.append(os.path.join(self.support_dir, 'lib', 'titanium-debug.jar'))
dex_args.append(os.path.join(self.support_dir, 'lib', 'titanium-profiler.jar'))
# the verifier depends on Ti.Network classes, so we may need to inject it
has_network_jar = False
for jar in self.android_jars:
if jar.endswith('titanium-network.jar'):
has_network_jar = True
break
if not has_network_jar:
dex_args.append(os.path.join(self.support_dir, 'modules', 'titanium-network.jar'))
info("Compiling Android Resources... This could take some time")
# TODO - Document Exit message
run_result = run.run(dex_args, warning_regex=r'warning: ')
if (run_result == None):
dex_built = False
error("System Error while compiling Android classes.dex")
sys.exit(1)
else:
dex_built = True
debug("Android classes.dex built")
if dex_built or generated_classes_built or self.tiapp_changed or self.manifest_changed or not self.app_installed or not self.fastdev:
# metadata has changed, we need to do a full re-deploy
launched, launch_failed = self.package_and_deploy()
if launched:
self.run_app()
info("Deployed %s ... Application should be running." % self.name)
elif launch_failed==False and not build_only:
info("Application installed. Launch from drawer on Home Screen")
elif not build_only:
# Relaunch app if nothing was built
info("Re-launching application ... %s" % self.name)
relaunched = False
killed = False
if self.fastdev:
killed = self.fastdev_kill_app()
if not killed:
processes = self.run_adb('shell', 'ps')
for line in processes.splitlines():
columns = line.split()
if len(columns) > 1:
pid = columns[1]
id = columns[len(columns)-1]
if id == self.app_id:
self.run_adb('shell', 'kill', pid)
relaunched = True
self.run_app()
if relaunched:
info("Relaunched %s ... Application should be running." % self.name)
self.post_build()
# Enable port forwarding for debugger if application
# acts as the server.
if debugger_enabled:
info('Forwarding host port %s to device for debugging.' % self.debugger_port)
forwardPort = 'tcp:%s' % self.debugger_port
self.sdk.run_adb(['forward', forwardPort, forwardPort])
# Enable port forwarding for profiler
if profiler_enabled:
info('Forwarding host port %s to device for profiling.' % self.profiler_port)
forwardPort = 'tcp:%s' % self.profiler_port
self.sdk.run_adb(['forward', forwardPort, forwardPort])
#intermediary code for on-device debugging (later)
#if debugger_host != None:
#import debugger
#debug("connecting to debugger: %s, debugger=%s" % (debugger_host, str(debugger)))
#debugger.run(debugger_host, '127.0.0.1:5999')
finally:
os.chdir(curdir)
sys.stdout.flush()
def post_build(self):
try:
if self.postbuild_modules:
for p in self.postbuild_modules:
info("Running postbuild function in %s plugin" % p[0])
p[1].postbuild()
except Exception,e:
error("Error performing post-build steps: %s" % e)
def finalize(self):
try:
if self.finalize_modules:
for p in self.finalize_modules:
info("Running finalize function in %s plugin" % p[0])
p[1].finalize()
except Exception,e:
error("Error performing finalize steps: %s" % e)
if __name__ == "__main__":
def usage():
print "%s <command> <project_name> <sdk_dir> <project_dir> <app_id> [key] [password] [alias] [dir] [avdid] [avdskin] [avdabi] [emulator options]" % os.path.basename(sys.argv[0])
print
print "available commands: "
print
print " emulator build and run the emulator"
print " simulator build and run the app on the simulator"
print " install build and install the app on the device"
print " distribute build final distribution package for upload to marketplace"
print " run build and run the project using values from tiapp.xml"
print " run-emulator run the emulator with a default AVD ID and skin"
sys.exit(1)
argc = len(sys.argv)
if argc < 2:
usage()
command = sys.argv[1]
if command == 'logcat':
launch_logcat()
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
get_values_from_tiapp = False
is_emulator = False
if command == 'run':
if argc < 4:
print 'Usage: %s run <project_dir> <android_sdk>' % sys.argv[0]
sys.exit(1)
get_values_from_tiapp = True
project_dir = sys.argv[2]
sdk_dir = sys.argv[3]
avd_id = "7"
elif command == 'run-emulator':
if argc < 4:
print 'Usage: %s run-emulator <project_dir> <android_sdk>' % sys.argv[0]
sys.exit(1)
get_values_from_tiapp = True
project_dir = sys.argv[2]
sdk_dir = sys.argv[3]
# sensible defaults?
avd_id = "7"
avd_skin = "HVGA"
else:
if command == 'emulator':
is_emulator = True
if argc < 6 or command == '--help' or (command=='distribute' and argc < 10):
usage()
if get_values_from_tiapp:
tiappxml = TiAppXML(os.path.join(project_dir, 'tiapp.xml'))
app_id = tiappxml.properties['id']
project_name = tiappxml.properties['name']
else:
project_name = dequote(sys.argv[2])
sdk_dir = os.path.abspath(os.path.expanduser(dequote(sys.argv[3])))
project_dir = os.path.abspath(os.path.expanduser(dequote(sys.argv[4])))
app_id = dequote(sys.argv[5])
log = TiLogger(os.path.join(os.path.abspath(os.path.expanduser(dequote(project_dir))), 'build.log'))
log.debug(" ".join(sys.argv))
builder = Builder(project_name,sdk_dir,project_dir,template_dir,app_id,is_emulator)
builder.command = command
try:
if command == 'run-emulator':
builder.run_emulator(avd_id, avd_skin, None, None, [])
elif command == 'run':
builder.build_and_run(False, avd_id)
elif command == 'emulator':
avd_id = dequote(sys.argv[6])
add_args = None
avd_abi = None
avd_skin = None
avd_name = None
if avd_id.isdigit():
avd_name = None
avd_skin = dequote(sys.argv[7])
if argc > 8:
# The first of the remaining args
# could either be an abi or an additional argument for
# the emulator. Compare to known abis.
next_index = 8
test_arg = sys.argv[next_index]
if test_arg in KNOWN_ABIS:
avd_abi = test_arg
next_index += 1
# Whatever remains (if anything) is an additional
# argument to pass to the emulator.
if argc > next_index:
add_args = sys.argv[next_index:]
else:
avd_name = sys.argv[6]
# If the avd is known by name, then the skin and abi shouldn't be passed,
# because the avd already has the skin and abi "in it".
avd_id = None
avd_skin = None
avd_abi = None
if argc > 7:
add_args = sys.argv[7:]
builder.run_emulator(avd_id, avd_skin, avd_name, avd_abi, add_args)
elif command == 'simulator':
info("Building %s for Android ... one moment" % project_name)
avd_id = dequote(sys.argv[6])
debugger_host = None
profiler_host = None
if len(sys.argv) > 9 and sys.argv[9] == 'profiler':
profiler_host = dequote(sys.argv[8])
elif len(sys.argv) > 8:
debugger_host = dequote(sys.argv[8])
builder.build_and_run(False, avd_id, debugger_host=debugger_host, profiler_host=profiler_host)
elif command == 'install':
avd_id = dequote(sys.argv[6])
device_args = ['-d']
# We have to be careful here because Windows can't handle an empty argument
# on the command line, so if a device serial number is not passed in, but
# a debugger_host (the argument after device serial number) _is_ passed in,
# to Windows it just looks like a serial number is passed in (the debugger_host
# argument shifts left to take over the empty argument.)
debugger_host = None
profiler_host = None
if len(sys.argv) >= 10 and sys.argv[9] == 'profiler':
profiler_host = dequote(sys.argv[8])
if len(sys.argv[7]) > 0:
device_args = ['-s', sys.argv[7]]
elif len(sys.argv) >= 9 and len(sys.argv[8]) > 0:
debugger_host = dequote(sys.argv[8])
if len(sys.argv[7]) > 0:
device_args = ['-s', sys.argv[7]]
elif len(sys.argv) >= 8 and len(sys.argv[7]) > 0:
arg7 = dequote(sys.argv[7])
if 'adb:' in arg7:
debugger_host = arg7
else:
device_args = ['-s', arg7]
builder.build_and_run(True, avd_id, device_args=device_args, debugger_host=debugger_host, profiler_host=profiler_host)
elif command == 'distribute':
key = os.path.abspath(os.path.expanduser(dequote(sys.argv[6])))
password = dequote(sys.argv[7])
alias = dequote(sys.argv[8])
output_dir = dequote(sys.argv[9])
builder.build_and_run(True, None, key, password, alias, output_dir)
elif command == 'build':
builder.build_and_run(False, 1, build_only=True)
else:
error("Unknown command: %s" % command)
usage()
except SystemExit, n:
sys.exit(n)
except:
e = traceback.format_exc()
error("Exception occured while building Android project:")
for line in e.splitlines():
error(line)
sys.exit(1)
finally:
# Don't run plugin finalizer functions if all we were doing is
# starting up the emulator.
if builder and command not in ("emulator", "run-emulator"):
builder.finalize()
| apache-2.0 |
mgron/cf-api | ping.py | 1 | 1031 | from wheezy.http import HTTPResponse
from wheezy.http import WSGIApplication
from wheezy.routing import url
from wheezy.web.handlers import BaseHandler
from wheezy.web.middleware import bootstrap_defaults
from wheezy.web.middleware import path_routing_middleware_factory
class PingHandler(BaseHandler):
def get(self):
response = HTTPResponse()
response.write('pong')
return response
def ping(request):
response = HTTPResponse()
response.write('poong')
return response
all_urls = [
url('', PingHandler, name='default'),
url('ping', ping, name='ping')
]
options = {}
main = WSGIApplication(
middleware=[
bootstrap_defaults(url_mapping=all_urls),
path_routing_middleware_factory
],
options=options
)
if __name__ == '__main__':
from wsgiref.simple_server import make_server
try:
print('Visit http://localhost:8081/')
make_server('', 8081, main).serve_forever()
except KeyboardInterrupt:
pass
print('\nThanks!')
| mit |
crobinso/virt-manager | virtManager/object/storagepool.py | 2 | 8736 | # Copyright (C) 2008, 2013 Red Hat, Inc.
# Copyright (C) 2008 Cole Robinson <crobinso@redhat.com>
#
# This work is licensed under the GNU GPLv2 or later.
# See the COPYING file in the top-level directory.
import time
from virtinst import log
from virtinst import pollhelpers
from virtinst import StoragePool, StorageVolume
from .libvirtobject import vmmLibvirtObject
def _pretty_bytes(val):
val = int(val)
if val > (1024 * 1024 * 1024):
return "%2.2f GiB" % (val / (1024.0 * 1024.0 * 1024.0))
else:
return "%2.2f MiB" % (val / (1024.0 * 1024.0))
POOL_TYPE_DESCS = {
StoragePool.TYPE_DIR: _("Filesystem Directory"),
StoragePool.TYPE_FS: _("Pre-Formatted Block Device"),
StoragePool.TYPE_NETFS: _("Network Exported Directory"),
StoragePool.TYPE_LOGICAL: _("LVM Volume Group"),
StoragePool.TYPE_DISK: _("Physical Disk Device"),
StoragePool.TYPE_ISCSI: _("iSCSI Target"),
StoragePool.TYPE_SCSI: _("SCSI Host Adapter"),
StoragePool.TYPE_MPATH: _("Multipath Device Enumerator"),
StoragePool.TYPE_GLUSTER: _("Gluster Filesystem"),
StoragePool.TYPE_RBD: _("RADOS Block Device/Ceph"),
StoragePool.TYPE_SHEEPDOG: _("Sheepdog Filesystem"),
StoragePool.TYPE_ZFS: _("ZFS Pool"),
}
class vmmStorageVolume(vmmLibvirtObject):
def __init__(self, conn, backend, key):
vmmLibvirtObject.__init__(self, conn, backend, key, StorageVolume)
##########################
# Required class methods #
##########################
def _conn_tick_poll_param(self):
return None # pragma: no cover
def class_name(self):
return "volume" # pragma: no cover
def _XMLDesc(self, flags):
try:
return self._backend.XMLDesc(flags)
except Exception as e: # pragma: no cover
log.debug("XMLDesc for vol=%s failed: %s",
self._backend.key(), e)
raise
def _get_backend_status(self):
return self._STATUS_ACTIVE
###########
# Actions #
###########
def get_parent_pool(self):
name = self._backend.storagePoolLookupByVolume().name()
for pool in self.conn.list_pools():
if pool.get_name() == name:
return pool
def delete(self, force=True):
ignore = force
self._backend.delete(0)
self._backend = None
#################
# XML accessors #
#################
def get_key(self):
return self.get_xmlobj().key or ""
def get_target_path(self):
return self.get_xmlobj().target_path or ""
def get_format(self):
return self.get_xmlobj().format
def get_capacity(self):
return self.get_xmlobj().capacity
def get_pretty_capacity(self):
return _pretty_bytes(self.get_capacity())
def get_pretty_name(self, pooltype):
name = self.get_name()
if pooltype != "iscsi":
return name
key = self.get_key()
ret = name
if key:
ret += " (%s)" % key
return ret
class vmmStoragePool(vmmLibvirtObject):
__gsignals__ = {
"refreshed": (vmmLibvirtObject.RUN_FIRST, None, [])
}
@staticmethod
def supports_volume_creation(pool_type, clone=False):
"""
Returns if pool supports volume creation. If @clone is set to True
returns if pool supports volume cloning (virVolCreateXMLFrom).
"""
supported = [
StoragePool.TYPE_DIR,
StoragePool.TYPE_FS,
StoragePool.TYPE_NETFS,
StoragePool.TYPE_DISK,
StoragePool.TYPE_LOGICAL,
StoragePool.TYPE_RBD,
]
if not clone:
supported.extend([
StoragePool.TYPE_SHEEPDOG,
StoragePool.TYPE_ZFS,
])
return pool_type in supported
@staticmethod
def pretty_type(pool_type):
return POOL_TYPE_DESCS.get(pool_type, "%s pool" % pool_type)
@staticmethod
def list_types():
return sorted(list(POOL_TYPE_DESCS.keys()))
def __init__(self, conn, backend, key):
vmmLibvirtObject.__init__(self, conn, backend, key, StoragePool)
self._last_refresh_time = 0
self._volumes = None
##########################
# Required class methods #
##########################
def _conn_tick_poll_param(self):
return "pollpool"
def class_name(self):
return "pool"
def _XMLDesc(self, flags):
return self._backend.XMLDesc(flags)
def _define(self, xml):
return self.conn.define_pool(xml)
def _using_events(self):
return self.conn.using_storage_pool_events
def _get_backend_status(self):
return (bool(self._backend.isActive()) and
self._STATUS_ACTIVE or
self._STATUS_INACTIVE)
def _init_libvirt_state(self):
super()._init_libvirt_state()
if not self.conn.is_active():
# We only want to refresh a pool on initial conn startup,
# since the pools may be out of date. But if a storage pool
# shows up while the conn is connected, this means it was
# just 'defined' recently and doesn't need to be refreshed.
self.refresh(_from_object_init=True)
for vol in self.get_volumes():
vol.init_libvirt_state()
def _invalidate_xml(self):
vmmLibvirtObject._invalidate_xml(self)
self._volumes = None
def _cleanup(self):
vmmLibvirtObject._cleanup(self)
for vol in self._volumes:
vol.cleanup()
self._volumes = None
###########
# Actions #
###########
@vmmLibvirtObject.lifecycle_action
def start(self):
self._backend.create(0)
@vmmLibvirtObject.lifecycle_action
def stop(self):
self._backend.destroy()
@vmmLibvirtObject.lifecycle_action
def delete(self, force=True):
ignore = force
self._backend.undefine()
self._backend = None
def refresh(self, _from_object_init=False):
"""
:param _from_object_init: Only used for the refresh() call from
_init_libvirt_state. Tells us to not refresh the XML, since
we just updated it.
"""
if not self.is_active():
return # pragma: no cover
self._backend.refresh(0)
if self._using_events() and not _from_object_init:
# If we are using events, we let the event loop trigger
# the cache update for us. Except if from init_libvirt_state,
# we want the update to be done immediately
return
self.refresh_pool_cache_from_event_loop(
_from_object_init=_from_object_init)
def refresh_pool_cache_from_event_loop(self, _from_object_init=False):
if not _from_object_init:
self.recache_from_event_loop()
self._update_volumes(force=True)
self.idle_emit("refreshed")
self._last_refresh_time = time.time()
def secs_since_last_refresh(self):
return time.time() - self._last_refresh_time
###################
# Volume handling #
###################
def get_volume_by_name(self, name):
for vol in self.get_volumes():
if vol.get_name() == name:
return vol
def get_volumes(self):
self._update_volumes(force=False)
return self._volumes[:]
def _update_volumes(self, force):
if not self.is_active():
self._volumes = []
return
if not force and self._volumes is not None:
return
keymap = dict((o.get_name(), o) for o in self._volumes or [])
def cb(obj, key):
return vmmStorageVolume(self.conn, obj, key)
(dummy1, dummy2, allvols) = pollhelpers.fetch_volumes(
self.conn.get_backend(), self.get_backend(), keymap, cb)
self._volumes = allvols
#########################
# XML/config operations #
#########################
def set_autostart(self, value):
self._backend.setAutostart(value)
def get_autostart(self):
return self._backend.autostart()
def get_type(self):
return self.get_xmlobj().type
def get_target_path(self):
return self.get_xmlobj().target_path or ""
def get_allocation(self):
return self.get_xmlobj().allocation
def get_available(self):
return self.get_xmlobj().available
def get_capacity(self):
return self.get_xmlobj().capacity
def get_pretty_allocation(self):
return _pretty_bytes(self.get_allocation())
def get_pretty_available(self):
return _pretty_bytes(self.get_available())
| gpl-2.0 |
nicolaoun/NS3-AM-Proto-Simulation | utils/tests/test-test.py | 77 | 4843 | #! /usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
#
# Copyright (c) 2014 Siddharth Santurkar
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# NOTE: Run this script with the Python3 interpreter if the python3 compatibility
# of the ns-3 unit test runner needs to be tested.
# The following options of test.py are being tested for poratability by this script.
# To see the options supported by this script, run with the -h option on the command line
#
# -h, --help show this help message and exit
# -b BUILDPATH, --buildpath=BUILDPATH
# specify the path where ns-3 was built (defaults to the
# build directory for the current variant)
# -c KIND, --constrain=KIND
# constrain the test-runner by kind of test
# -d, --duration print the duration of each test suite and example
# -e EXAMPLE, --example=EXAMPLE
# specify a single example to run (no relative path is
# needed)
# -u, --update-data If examples use reference data files, get them to re-
# generate them
# -f FULLNESS, --fullness=FULLNESS
# choose the duration of tests to run: QUICK, EXTENSIVE,
# or TAKES_FOREVER, where EXTENSIVE includes QUICK and
# TAKES_FOREVER includes QUICK and EXTENSIVE (only QUICK
# tests are run by default)
# -g, --grind run the test suites and examples using valgrind
# -k, --kinds print the kinds of tests available
# -l, --list print the list of known tests
# -m, --multiple report multiple failures from test suites and test
# cases
# -n, --nowaf do not run waf before starting testing
# -p PYEXAMPLE, --pyexample=PYEXAMPLE
# specify a single python example to run (with relative
# path)
# -r, --retain retain all temporary files (which are normally
# deleted)
# -s TEST-SUITE, --suite=TEST-SUITE
# specify a single test suite to run
# -t TEXT-FILE, --text=TEXT-FILE
# write detailed test results into TEXT-FILE.txt
# -v, --verbose print progress and informational messages
# -w HTML-FILE, --web=HTML-FILE, --html=HTML-FILE
# write detailed test results into HTML-FILE.html
# -x XML-FILE, --xml=XML-FILE
# write detailed test results into XML-FILE.xml
from __future__ import print_function
from TestBase import TestBaseClass
import sys
def main(argv):
"""
Prepares test cases and executes
"""
test_cases = [
'',
'-h',
'--help',
'-b build/',
'--buildpath=build/',
'-c performance',
'--constrain=performance',
'-d',
'--duration',
'-e socket-options-ipv6',
'--example=socket-options-ipv6',
'-u',
'--update-data',
'-f EXTENSIVE --fullness=EXTENSIVE'
'-g',
'--grind',
'-l',
'--list',
'-m',
'--multiple',
'-n',
'--nowaf',
'-p first',
'--pyexample=first',
'-r',
'--retain',
'-s ns3-tcp-interoperability',
'--suite=ns3-tcp-interoperability',
'-t t_opt.txt',
'--text=t_opt.txt && rm -rf t_opt.txt',
'-v',
'--verbose',
'-w t_opt.html && rm -rf t_opt.html',
'--web=t_opt.html && rm -rf t_opt.html',
'--html=t_opt.html && rm -rf t_opt.html',
'-x t_opt.xml && rm -rf t_opt.xml',
'--xml=t_opt.xml && rm -rf t_opt.xml',
]
configure_string = sys.executable + ' waf configure --enable-tests --enable-examples'
clean_string = sys.executable + ' waf clean'
cmd_execute_list = [ '%s && %s test.py %s && %s' % (configure_string, sys.executable, option, clean_string) for option in test_cases]
runner = TestBaseClass(argv[1:], "Test suite for the ns-3 unit test runner" , 'test-py')
return runner.runtests(cmd_execute_list)
if __name__ == '__main__':
sys.exit(main(sys.argv)) | gpl-2.0 |
pyupio/octohook | hook/hook.py | 2 | 3954 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import imp
import hmac
import hashlib
import six
from flask import Flask, abort, request
DEBUG = os.environ.get("DEBUG", False) == 'True'
HOST = os.environ.get("HOST", '0.0.0.0')
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
REPO_DIR = os.path.join(ROOT_DIR, "repos")
GITHUB_EVENTS = [
"commit_comment",
"create",
"delete",
"deployment",
"deployment_status",
"fork",
"gollum",
"issue_comment",
"issues",
"member",
"membership",
"page_build",
"public",
"pull_request_review_comment",
"pull_request",
"push",
"repository",
"release",
"status",
"team_add",
"watch",
"ping", # sent by github to check if the endpoint is available
]
app = Flask(__name__)
def hook(repo):
"""Processes an incoming webhook, see GITHUB_EVENTS for possible events.
"""
event, signature = (
request.headers.get('X-Github-Event', False),
request.headers.get('X-Hub-Signature', False)
)
# If we are not running on DEBUG, the X-Hub-Signature header has to be set.
# Raising a 404 is not the right http return code, but we don't
# want to give someone that is attacking this endpoint a clue
# that we are serving this repo alltogether if he doesn't
# know our secret key
if not DEBUG:
if not signature:
abort(404)
# Check that the payload is signed by the secret key. Again,
# if this is not the case, abort with a 404
if not is_signed(payload=request.get_data(as_text=True), signature=signature, secret=repo.SECRET):
abort(404)
# make sure the event is set
if event not in GITHUB_EVENTS:
abort(400)
data = request.get_json()
# call the always function and the event function (when implemented)
for function in ["always", event]:
if hasattr(repo, function):
getattr(repo, function)(data)
return "ok"
def is_signed(payload, signature, secret):
"""
https://developer.github.com/webhooks/securing/#validating-payloads-from-github
"""
if six.PY3: # pragma: no cover
payload = payload.encode("utf-8")
secret = secret.encode("utf-8")
digest = "sha1=" + hmac.new(
secret,
msg=payload,
digestmod=hashlib.sha1
).hexdigest()
return digest == signature
def import_repo_by_name(name):
module_name = ".".join(["repos", name])
full_path = os.path.join(REPO_DIR, name + ".py")
module = imp.load_source(module_name, full_path)
env_var = "{name}_SECRET".format(name=name.upper())
if env_var not in os.environ:
if DEBUG:
print("WARNING: You need to set the environment variable {env_var}"
" when not in DEBUG mode.".format(
env_var=env_var
))
else:
raise AssertionError(
"You need to set {env_var}".format(
env_var=env_var)
)
else:
setattr(module, "SECRET", os.environ.get(env_var))
return module
def build_routes():
for _, _, filenames in os.walk(REPO_DIR):
for filename in filenames:
if filename.endswith(".py"):
name, _, _ = filename.partition(".py")
app.add_url_rule(
rule="/{}/".format(name),
endpoint=name,
view_func=hook,
methods=["POST"],
defaults={"repo": import_repo_by_name(name)}
)
if __name__ == "__main__": # pragma: no cover
if DEBUG:
print("WARNING: running in DEBUG mode. Incoming webhooks will not be checked for a "
"valid signature.")
build_routes()
app.run(host=HOST, debug=DEBUG)
| mit |
cancro7/gem5 | util/stats/print.py | 90 | 5393 | # Copyright (c) 2003-2004 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
all = False
descriptions = False
class Value:
def __init__(self, value, precision, percent = False):
self.value = value
self.precision = precision
self.percent = percent
def __str__(self):
if isinstance(self.value, str):
if self.value.lower() == 'nan':
value = 'NaN'
if self.value.lower() == 'inf':
value = 'Inf'
else:
if self.precision >= 0:
format = "%%.%df" % self.precision
elif self.value == 0.0:
format = "%.0f"
elif self.value % 1.0 == 0.0:
format = "%.0f"
else:
format = "%f"
value = self.value
if self.percent:
value = value * 100.0
value = format % value
if self.percent:
value = value + "%"
return value
class Print:
def __init__(self, **vals):
self.__dict__.update(vals)
def __str__(self):
value = Value(self.value, self.precision)
pdf = ''
cdf = ''
if self.__dict__.has_key('pdf'):
pdf = Value(self.pdf, 2, True)
if self.__dict__.has_key('cdf'):
cdf = Value(self.cdf, 2, True)
output = "%-40s %12s %8s %8s" % (self.name, value, pdf, cdf)
if descriptions and self.__dict__.has_key('desc') and self.desc:
output = "%s # %s" % (output, self.desc)
return output
def doprint(self):
if display_all:
return True
if self.value == 0.0 and (self.flags & flags_nozero):
return False
if isinstance(self.value, str):
if self.value == 'NaN' and (self.flags & flags_nonan):
return False
return True
def display(self):
if self.doprint():
print self
class VectorDisplay:
def display(self):
p = Print()
p.flags = self.flags
p.precision = self.precision
if isinstance(self.value, (list, tuple)):
if not len(self.value):
return
mytotal = reduce(lambda x,y: float(x) + float(y), self.value)
mycdf = 0.0
value = self.value
if display_all:
subnames = [ '[%d]' % i for i in range(len(value)) ]
else:
subnames = [''] * len(value)
if self.__dict__.has_key('subnames'):
for i,each in enumerate(self.subnames):
if len(each) > 0:
subnames[i] = '.%s' % each
subdescs = [self.desc]*len(value)
if self.__dict__.has_key('subdescs'):
for i in xrange(min(len(value), len(self.subdescs))):
subdescs[i] = self.subdescs[i]
for val,sname,sdesc in map(None, value, subnames, subdescs):
if mytotal > 0.0:
mypdf = float(val) / float(mytotal)
mycdf += mypdf
if (self.flags & flags_pdf):
p.pdf = mypdf
p.cdf = mycdf
if len(sname) == 0:
continue
p.name = self.name + sname
p.desc = sdesc
p.value = val
p.display()
if (self.flags & flags_total):
if (p.__dict__.has_key('pdf')): del p.__dict__['pdf']
if (p.__dict__.has_key('cdf')): del p.__dict__['cdf']
p.name = self.name + '.total'
p.desc = self.desc
p.value = mytotal
p.display()
else:
p.name = self.name
p.desc = self.desc
p.value = self.value
p.display()
| bsd-3-clause |
bayasist/vbox | src/VBox/Additions/common/crOpenGL/windows_exports.py | 22 | 2387 | # Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
import sys
import apiutil
def GenerateEntrypoints():
apiutil.CopyrightC()
print '#include "chromium.h"'
print '#include "stub.h"'
print ''
print '#define NAKED __declspec(naked)'
print '#define UNUSED(x) ((void)(x))'
print ''
# Get sorted list of dispatched functions.
# The order is very important - it must match cr_opcodes.h
# and spu_dispatch_table.h
keys = apiutil.GetDispatchedFunctions(sys.argv[1]+"/APIspec.txt")
for index in range(len(keys)):
func_name = keys[index]
if apiutil.Category(func_name) == "Chromium":
continue
if apiutil.Category(func_name) == "VBox":
continue
return_type = apiutil.ReturnType(func_name)
params = apiutil.Parameters(func_name)
print "NAKED %s cr_gl%s( %s )" % (return_type, func_name,
apiutil.MakeDeclarationString( params ))
print "{"
print "\t__asm jmp [glim.%s]" % func_name
for (name, type, vecSize) in params:
print "\tUNUSED( %s );" % name
print "}"
print ""
print '/*'
print '* Aliases'
print '*/'
# Now loop over all the functions and take care of any aliases
allkeys = apiutil.GetAllFunctions(sys.argv[1]+"/APIspec.txt")
for func_name in allkeys:
if "omit" in apiutil.ChromiumProps(func_name):
continue
if func_name in keys:
# we already processed this function earlier
continue
# alias is the function we're aliasing
alias = apiutil.Alias(func_name)
if alias:
return_type = apiutil.ReturnType(func_name)
params = apiutil.Parameters(func_name)
print "NAKED %s cr_gl%s( %s )" % (return_type, func_name,
apiutil.MakeDeclarationString( params ))
print "{"
print "\t__asm jmp [glim.%s]" % alias
for (name, type, vecSize) in params:
print "\tUNUSED( %s );" % name
print "}"
print ""
print '/*'
print '* No-op stubs'
print '*/'
# Now generate no-op stub functions
for func_name in allkeys:
if "stub" in apiutil.ChromiumProps(func_name):
return_type = apiutil.ReturnType(func_name)
params = apiutil.Parameters(func_name)
print "NAKED %s cr_gl%s( %s )" % (return_type, func_name, apiutil.MakeDeclarationString(params))
print "{"
if return_type != "void":
print "return (%s) 0" % return_type
print "}"
print ""
GenerateEntrypoints()
| gpl-2.0 |
payeldillip/django | django/contrib/auth/tokens.py | 433 | 2803 | from datetime import date
from django.conf import settings
from django.utils import six
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.http import base36_to_int, int_to_base36
class PasswordResetTokenGenerator(object):
"""
Strategy object used to generate and check tokens for the password
reset mechanism.
"""
key_salt = "django.contrib.auth.tokens.PasswordResetTokenGenerator"
def make_token(self, user):
"""
Returns a token that can be used once to do a password reset
for the given user.
"""
return self._make_token_with_timestamp(user, self._num_days(self._today()))
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
# Parse the token
try:
ts_b36, hash = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
return False
# Check the timestamp is within limit
if (self._num_days(self._today()) - ts) > settings.PASSWORD_RESET_TIMEOUT_DAYS:
return False
return True
def _make_token_with_timestamp(self, user, timestamp):
# timestamp is number of days since 2001-1-1. Converted to
# base 36, this gives us a 3 digit string until about 2121
ts_b36 = int_to_base36(timestamp)
# By hashing on the internal state of the user and using state
# that is sure to change (the password salt will change as soon as
# the password is set, at least for current Django auth, and
# last_login will also change), we produce a hash that will be
# invalid as soon as it is used.
# We limit the hash to 20 chars to keep URL short
hash = salted_hmac(
self.key_salt,
self._make_hash_value(user, timestamp),
).hexdigest()[::2]
return "%s-%s" % (ts_b36, hash)
def _make_hash_value(self, user, timestamp):
# Ensure results are consistent across DB backends
login_timestamp = '' if user.last_login is None else user.last_login.replace(microsecond=0, tzinfo=None)
return (
six.text_type(user.pk) + user.password +
six.text_type(login_timestamp) + six.text_type(timestamp)
)
def _num_days(self, dt):
return (dt - date(2001, 1, 1)).days
def _today(self):
# Used for mocking in tests
return date.today()
default_token_generator = PasswordResetTokenGenerator()
| bsd-3-clause |
mangaki/mangaki | mangaki/mangaki/tests/test_anidb.py | 1 | 11241 | from datetime import datetime
import os
import responses
from django.conf import settings
from django.test import TestCase
from mangaki.models import Category, Editor, Studio, Work, RelatedWork, Role, Staff, Artist, TaggedWork, Tag
from mangaki.utils.anidb import to_python_datetime, AniDB, diff_between_anidb_and_local_tags
class AniDBTest(TestCase):
@staticmethod
def create_anime(**kwargs):
anime = Category.objects.get(slug='anime')
return Work.objects.create(category=anime, **kwargs)
@staticmethod
def read_fixture(filename):
with open(os.path.join(settings.TEST_DATA_DIR, filename), 'r', encoding='utf-8') as f:
return f.read()
def setUp(self):
self.anidb = AniDB('testclient', 1)
self.no_anidb = AniDB()
self.search_fixture = self.read_fixture('search_sangatsu_no_lion.xml')
def test_to_python_datetime(self):
self.assertEqual(to_python_datetime('2017-12-25'), datetime(2017, 12, 25, 0, 0))
self.assertEqual(to_python_datetime('2017-12'), datetime(2017, 12, 1, 0, 0))
self.assertEqual(to_python_datetime('2017'), datetime(2017, 1, 1, 0, 0))
self.assertRaises(ValueError, to_python_datetime, '2017-25')
def test_missing_client(self):
self.assertRaises(RuntimeError, self.no_anidb._request, 'dummypage')
self.assertFalse(self.no_anidb.is_available)
@responses.activate
def test_anidb_search(self):
responses.add(
responses.GET,
AniDB.SEARCH_URL,
body=self.search_fixture,
status=200,
content_type='application/xml'
)
anime_query = 'sangatsu no lion'
results = self.anidb.search(q=anime_query)
self.assertEqual(len(results), 2)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_anidb_get_methods(self):
responses.add(
responses.GET,
AniDB.BASE_URL,
body=self.read_fixture('anidb/sangatsu_no_lion.xml'),
status=200,
content_type='application/xml'
)
titles, main_title = self.anidb.get_titles(anidb_aid=11606)
creators, studio = self.anidb.get_creators(anidb_aid=11606)
tags = self.anidb.get_tags(anidb_aid=11606)
related_animes = self.anidb.get_related_animes(anidb_aid=11606)
self.assertEqual(len(titles), 9)
self.assertEqual(main_title, 'Sangatsu no Lion')
self.assertEqual(len(creators), 4)
self.assertEqual(studio.title, 'Shaft')
self.assertEqual(len(tags), 30)
self.assertEqual(len(related_animes), 2)
@responses.activate
def test_anidb_get_animes(self):
# Fake an artist entry with no AniDB creator ID that will be filled by retrieving Sangatsu
artist = Artist(name="Shinbou Akiyuki").save()
filenames = ['anidb/sangatsu_no_lion.xml', 'anidb/sangatsu_no_lion.xml', 'anidb/hibike_euphonium.xml']
with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps:
for filename in filenames:
rsps.add(
responses.GET,
AniDB.BASE_URL,
body=self.read_fixture(filename),
status=200,
content_type='application/xml'
)
sangatsu = self.anidb.get_or_update_work(11606)
tags_sangatsu_from_anidb = self.anidb.get_tags(11606)
tags_diff_sangatsu = diff_between_anidb_and_local_tags(sangatsu, tags_sangatsu_from_anidb)
hibike = self.anidb.get_or_update_work(10889)
# Retrieve tags
tags_sangatsu = set(Work.objects.get(pk=sangatsu.pk).taggedwork_set.all().values_list('tag__title', flat=True))
tags_hibike = set(Work.objects.get(pk=hibike.pk).taggedwork_set.all().values_list('tag__title', flat=True))
shared_tags = tags_sangatsu.intersection(tags_hibike)
# Checks on tags
self.assertEqual(len(tags_sangatsu), 30)
self.assertEqual(len(tags_hibike), 38)
self.assertEqual(len(shared_tags), 18)
# Check for Sangatsu's informations
self.assertEqual(sangatsu.title, 'Sangatsu no Lion')
self.assertEqual(sangatsu.nb_episodes, 22)
self.assertEqual(sangatsu.studio.title, 'Shaft')
self.assertEqual(sangatsu.date, datetime(2016, 10, 8, 0, 0))
self.assertEqual(sangatsu.end_date, datetime(2017, 3, 18, 0, 0))
# Check for Sangatsu's staff
staff_sangatsu = Work.objects.get(pk=sangatsu.pk).staff_set.all().values_list('artist__name', flat=True)
self.assertCountEqual(staff_sangatsu, ['Umino Chika', 'Hashimoto Yukari', 'Shinbou Akiyuki', 'Okada Kenjirou'])
# Check retrieved tags from AniDB
self.assertEqual(len(tags_diff_sangatsu["deleted_tags"]), 0)
self.assertEqual(len(tags_diff_sangatsu["added_tags"]), 0)
self.assertEqual(len(tags_diff_sangatsu["updated_tags"]), 0)
self.assertEqual(len(tags_diff_sangatsu["kept_tags"]), len(tags_sangatsu))
# Check for no artist duplication
artist = Artist.objects.filter(name="Shinbou Akiyuki")
self.assertEqual(artist.count(), 1)
self.assertEqual(artist.first().anidb_creator_id, 59)
@responses.activate
def test_anidb_duplicate_anime_id(self):
for _ in range(2):
responses.add(
responses.GET,
AniDB.BASE_URL,
body=self.read_fixture('anidb/hibike_euphonium.xml'),
status=200,
content_type='application/xml'
)
self.create_anime(title='Hibike! Euphonium', anidb_aid=10889)
self.create_anime(title='Hibike! Euphonium Duplicate', anidb_aid=10889)
self.anidb.get_or_update_work(10889)
self.assertIs(self.anidb.get_or_update_work(10889), None)
@responses.activate
def test_anidb_nsfw(self):
animes = {}
animes_sources = {
# Not NSFW at all
'anidb/sangatsu_no_lion.xml': (11606, 'Sangatsu no Lion'),
'anidb/hibike_euphonium.xml': (10889, 'Hibike! Euphonium'),
# Totally NSFW (restricted on AniDB)
'anidb/boku_no_piko.xml': (4544, 'Boku no Piko'),
'anidb/bible_black.xml': (528, 'Bible Black'),
# Should be marked NSFW
'anidb/r15.xml': (8396, 'R-15'),
'anidb/astarotte_no_omocha_ex.xml': (8560, 'Astarotte no Omocha! EX'),
'anidb/aki_sora.xml': (6782, 'Aki Sora'),
# Shouldn't be marked NSFW
'anidb/punchline.xml': (10948, 'Punch Line'),
'anidb/panty_stocking.xml': (7529, 'Panty & Stocking with Garterbelt'),
'anidb/shimoneta.xml': (10888, 'Shimoneta to Iu Gainen ga Sonzai Shinai Taikutsu na Sekai')
}
are_nsfw = ['anidb/boku_no_piko.xml', 'anidb/bible_black.xml',
'anidb/r15.xml', 'anidb/astarotte_no_omocha_ex.xml',
'anidb/aki_sora.xml']
are_sfw = ['anidb/sangatsu_no_lion.xml', 'anidb/hibike_euphonium.xml',
'anidb/punchline.xml', 'anidb/panty_stocking.xml',
'anidb/shimoneta.xml']
with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps:
for filename, _ in animes_sources.items():
rsps.add(
responses.GET,
AniDB.BASE_URL,
body=self.read_fixture(filename),
status=200,
content_type='application/xml'
)
for filename, infos in animes_sources.items():
animes[filename] = self.anidb.get_or_update_work(infos[0])
for filename in are_nsfw:
with self.subTest('Asserting NSFW', anime=animes_sources[filename][1]):
self.assertEqual(animes[filename].title, animes_sources[filename][1])
self.assertTrue(animes[filename].nsfw)
for filename in are_sfw:
with self.subTest('Asserting SFW', anime=animes_sources[filename][1]):
self.assertEqual(animes[filename].title, animes_sources[filename][1])
self.assertFalse(animes[filename].nsfw)
@responses.activate
def test_anidb_related_animes(self):
animes = {}
related_animes = {}
animes_sources = {
'anidb/hibike_euphonium.xml': 10889,
'anidb/hibike_euphonium2.xml': 11746,
'anidb/hibike_euphonium_movie1.xml': 11747,
'anidb/hibike_euphonium_movie2.xml': 12962,
'anidb/hibike_euphonium_original_movies.xml': 13207,
'anidb/sangatsu_no_lion.xml': 11606
}
with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps:
for filename, _ in animes_sources.items():
for _ in range(2):
rsps.add(
responses.GET,
AniDB.BASE_URL,
body=self.read_fixture(filename),
status=200,
content_type='application/xml'
)
for filename, anidb_aid in animes_sources.items():
animes[filename] = self.anidb.get_or_update_work(anidb_aid)
related_animes[filename] = self.anidb.get_related_animes(anidb_aid=anidb_aid)
# Ran once in get_or_update_work but ran again to check that it does not cause errors
for filename in animes_sources:
self.anidb._build_related_animes(animes[filename], related_animes[filename])
relations = RelatedWork.objects.filter(
child_work__anidb_aid__in=animes_sources.values(),
parent_work__anidb_aid__in=animes_sources.values()
)
# Checks that anime are created if missing but not all data is retrieved from AniDB
self.assertEqual(Work.objects.get(title='Sangatsu no Lion meets Bump of Chicken').ext_synopsis, '')
self.assertNotEqual(Work.objects.get(title='Sangatsu no Lion').ext_synopsis, '')
# Checks on relations
self.assertTrue(relations.filter(child_work__anidb_aid=11746, parent_work__anidb_aid=10889, type='sequel').exists())
self.assertTrue(relations.filter(child_work__anidb_aid=10889, parent_work__anidb_aid=11746, type='prequel').exists())
self.assertTrue(relations.filter(child_work__anidb_aid=11747, parent_work__anidb_aid=10889, type='summary').exists())
self.assertTrue(relations.filter(child_work__anidb_aid=10889, parent_work__anidb_aid=11747, type='full_story').exists())
self.assertTrue(relations.filter(child_work__anidb_aid=13207, parent_work__anidb_aid=11746, type='sequel').exists())
self.assertTrue(relations.filter(child_work__anidb_aid=11746, parent_work__anidb_aid=13207, type='prequel').exists())
self.assertTrue(relations.filter(child_work__anidb_aid=12962, parent_work__anidb_aid=11746, type='summary').exists())
self.assertTrue(relations.filter(child_work__anidb_aid=11746, parent_work__anidb_aid=12962, type='full_story').exists())
| agpl-3.0 |
chouseknecht/ansible | lib/ansible/modules/network/cloudvision/cv_server_provision.py | 52 | 24337 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cv_server_provision
version_added: "2.4"
author: "EOS+ CS (ansible-dev@arista.com) (@mharista)"
short_description:
Provision server port by applying or removing template configuration to an
Arista CloudVision Portal configlet that is applied to a switch.
description:
- This module allows a server team to provision server network ports for
new servers without having to access Arista CVP or asking the network team
to do it for them. Provide the information for connecting to CVP, switch
rack, port the new server is connected to, optional vlan, and an action
and the module will apply the configuration to the switch port via CVP.
Actions are add (applies template config to port),
remove (defaults the interface config) and
show (returns the current port config).
options:
host:
description:
- The hostname or IP address of the CVP node being connected to.
required: true
port:
description:
- The port number to use when making API calls to the CVP node. This
will default to the default port for the specified protocol. Port 80
for http and port 443 for https.
protocol:
description:
- The protocol to use when making API calls to CVP. CVP defaults to https
and newer versions of CVP no longer support http.
default: https
choices: [https, http]
username:
description:
- The user that will be used to connect to CVP for making API calls.
required: true
password:
description:
- The password of the user that will be used to connect to CVP for API
calls.
required: true
server_name:
description:
- The hostname or identifier for the server that is having it's switch
port provisioned.
required: true
switch_name:
description:
- The hostname of the switch is being configured for the server being
provisioned.
required: true
switch_port:
description:
- The physical port number on the switch that the new server is
connected to.
required: true
port_vlan:
description:
- The vlan that should be applied to the port for this server.
This parameter is dependent on a proper template that supports single
vlan provisioning with it. If a port vlan is specified by the template
specified does not support this the module will exit out with no
changes. If a template is specified that requires a port vlan but no
port vlan is specified the module will exit out with no changes.
template:
description:
- A path to a Jinja formatted template file that contains the
configuration block that will be applied to the specified switch port.
This template will have variable fields replaced by the module before
being applied to the switch configuration.
required: true
action:
description:
- The action for the module to take. The actions are add, which applies
the specified template config to port, remove, which defaults the
specified interface configuration, and show, which will return the
current port configuration with no changes.
default: show
choices: [show, add, remove]
auto_run:
description:
- Flag that determines whether or not the module will execute the CVP
task spawned as a result of changes to a switch configlet. When an
add or remove action is taken which results in a change to a switch
configlet, CVP will spawn a task that needs to be executed for the
configuration to be applied to the switch. If this option is True then
the module will determined the task number created by the configuration
change, execute it and wait for the task to complete. If the option
is False then the task will remain in the Pending state in CVP for
a network administrator to review and execute.
type: bool
default: 'no'
requirements: [Jinja2, cvprac >= 0.7.0]
'''
EXAMPLES = '''
- name: Get current configuration for interface Ethernet2
cv_server_provision:
host: cvp_node
username: cvp_user
password: cvp_pass
protocol: https
server_name: new_server
switch_name: eos_switch_1
switch_port: 2
template: template_file.j2
action: show
- name: Remove existing configuration from interface Ethernet2. Run task.
cv_server_provision:
host: cvp_node
username: cvp_user
password: cvp_pass
protocol: https
server_name: new_server
switch_name: eos_switch_1
switch_port: 2
template: template_file.j2
action: remove
auto_run: True
- name: Add template configuration to interface Ethernet2. No VLAN. Run task.
cv_server_provision:
host: cvp_node
username: cvp_user
password: cvp_pass
protocol: https
server_name: new_server
switch_name: eos_switch_1
switch_port: 2
template: single_attached_trunk.j2
action: add
auto_run: True
- name: Add template with VLAN configuration to interface Ethernet2. Run task.
cv_server_provision:
host: cvp_node
username: cvp_user
password: cvp_pass
protocol: https
server_name: new_server
switch_name: eos_switch_1
switch_port: 2
port_vlan: 22
template: single_attached_vlan.j2
action: add
auto_run: True
'''
RETURN = '''
changed:
description: Signifies if a change was made to the configlet
returned: success
type: bool
sample: true
currentConfigBlock:
description: The current config block for the user specified interface
returned: when action = show
type: str
sample: |
interface Ethernet4
!
newConfigBlock:
description: The new config block for the user specified interface
returned: when action = add or remove
type: str
sample: |
interface Ethernet3
description example
no switchport
!
oldConfigBlock:
description: The current config block for the user specified interface
before any changes are made
returned: when action = add or remove
type: str
sample: |
interface Ethernet3
!
fullConfig:
description: The full config of the configlet after being updated
returned: when action = add or remove
type: str
sample: |
!
interface Ethernet3
!
interface Ethernet4
!
updateConfigletResponse:
description: Response returned from CVP when configlet update is triggered
returned: when action = add or remove and configuration changes
type: str
sample: "Configlet veos1-server successfully updated and task initiated."
portConfigurable:
description: Signifies if the user specified port has an entry in the
configlet that Ansible has access to
returned: success
type: bool
sample: true
switchConfigurable:
description: Signifies if the user specified switch has a configlet
applied to it that CVP is allowed to edit
returned: success
type: bool
sample: true
switchInfo:
description: Information from CVP describing the switch being configured
returned: success
type: dict
sample: {"architecture": "i386",
"bootupTimeStamp": 1491264298.21,
"complianceCode": "0000",
"complianceIndication": "NONE",
"deviceInfo": "Registered",
"deviceStatus": "Registered",
"fqdn": "veos1",
"hardwareRevision": "",
"internalBuildId": "12-12",
"internalVersion": "4.17.1F-11111.4171F",
"ipAddress": "192.168.1.20",
"isDANZEnabled": "no",
"isMLAGEnabled": "no",
"key": "00:50:56:5d:e5:e0",
"lastSyncUp": 1496432895799,
"memFree": 472976,
"memTotal": 1893460,
"modelName": "vEOS",
"parentContainerId": "container_13_5776759195930",
"serialNumber": "",
"systemMacAddress": "00:50:56:5d:e5:e0",
"taskIdList": [],
"tempAction": null,
"type": "netelement",
"unAuthorized": false,
"version": "4.17.1F",
"ztpMode": "false"}
taskCompleted:
description: Signifies if the task created and executed has completed successfully
returned: when action = add or remove, and auto_run = true,
and configuration changes
type: bool
sample: true
taskCreated:
description: Signifies if a task was created due to configlet changes
returned: when action = add or remove, and auto_run = true or false,
and configuration changes
type: bool
sample: true
taskExecuted:
description: Signifies if the automation executed the spawned task
returned: when action = add or remove, and auto_run = true,
and configuration changes
type: bool
sample: true
taskId:
description: The task ID created by CVP because of changes to configlet
returned: when action = add or remove, and auto_run = true or false,
and configuration changes
type: str
sample: "500"
'''
import re
import time
from ansible.module_utils.basic import AnsibleModule
try:
import jinja2
from jinja2 import meta
HAS_JINJA2 = True
except ImportError:
HAS_JINJA2 = False
try:
from cvprac.cvp_client import CvpClient
from cvprac.cvp_client_errors import CvpLoginError, CvpApiError
HAS_CVPRAC = True
except ImportError:
HAS_CVPRAC = False
def connect(module):
''' Connects to CVP device using user provided credentials from playbook.
:param module: Ansible module with parameters and client connection.
:return: CvpClient object with connection instantiated.
'''
client = CvpClient()
try:
client.connect([module.params['host']],
module.params['username'],
module.params['password'],
protocol=module.params['protocol'],
port=module.params['port'])
except CvpLoginError as e:
module.fail_json(msg=str(e))
return client
def switch_info(module):
''' Get dictionary of switch info from CVP.
:param module: Ansible module with parameters and client connection.
:return: Dict of switch info from CVP or exit with failure if no
info for device is found.
'''
switch_name = module.params['switch_name']
switch_info = module.client.api.get_device_by_name(switch_name)
if not switch_info:
module.fail_json(msg=str("Device with name '%s' does not exist."
% switch_name))
return switch_info
def switch_in_compliance(module, sw_info):
''' Check if switch is currently in compliance.
:param module: Ansible module with parameters and client connection.
:param sw_info: Dict of switch info.
:return: Nothing or exit with failure if device is not in compliance.
'''
compliance = module.client.api.check_compliance(sw_info['key'],
sw_info['type'])
if compliance['complianceCode'] != '0000':
module.fail_json(msg=str('Switch %s is not in compliance. Returned'
' compliance code %s.'
% (sw_info['fqdn'],
compliance['complianceCode'])))
def server_configurable_configlet(module, sw_info):
''' Check CVP that the user specified switch has a configlet assigned to
it that Ansible is allowed to edit.
:param module: Ansible module with parameters and client connection.
:param sw_info: Dict of switch info.
:return: Dict of configlet information or None.
'''
configurable_configlet = None
configlet_name = module.params['switch_name'] + '-server'
switch_configlets = module.client.api.get_configlets_by_device_id(
sw_info['key'])
for configlet in switch_configlets:
if configlet['name'] == configlet_name:
configurable_configlet = configlet
return configurable_configlet
def port_configurable(module, configlet):
''' Check configlet if the user specified port has a configuration entry
in the configlet to determine if Ansible is allowed to configure the
port on this switch.
:param module: Ansible module with parameters and client connection.
:param configlet: Dict of configlet info.
:return: True or False.
'''
configurable = False
regex = r'^interface Ethernet%s' % module.params['switch_port']
for config_line in configlet['config'].split('\n'):
if re.match(regex, config_line):
configurable = True
return configurable
def configlet_action(module, configlet):
''' Take appropriate action based on current state of device and user
requested action.
Return current config block for specified port if action is show.
If action is add or remove make the appropriate changes to the
configlet and return the associated information.
:param module: Ansible module with parameters and client connection.
:param configlet: Dict of configlet info.
:return: Dict of information to updated results with.
'''
result = dict()
existing_config = current_config(module, configlet['config'])
if module.params['action'] == 'show':
result['currentConfigBlock'] = existing_config
return result
elif module.params['action'] == 'add':
result['newConfigBlock'] = config_from_template(module)
elif module.params['action'] == 'remove':
result['newConfigBlock'] = ('interface Ethernet%s\n!'
% module.params['switch_port'])
result['oldConfigBlock'] = existing_config
result['fullConfig'] = updated_configlet_content(module,
configlet['config'],
result['newConfigBlock'])
resp = module.client.api.update_configlet(result['fullConfig'],
configlet['key'],
configlet['name'])
if 'data' in resp:
result['updateConfigletResponse'] = resp['data']
if 'task' in resp['data']:
result['changed'] = True
result['taskCreated'] = True
return result
def current_config(module, config):
''' Parse the full port configuration for the user specified port out of
the full configlet configuration and return as a string.
:param module: Ansible module with parameters and client connection.
:param config: Full config to parse specific port config from.
:return: String of current config block for user specified port.
'''
regex = r'^interface Ethernet%s' % module.params['switch_port']
match = re.search(regex, config, re.M)
if not match:
module.fail_json(msg=str('interface section not found - %s'
% config))
block_start, line_end = match.regs[0]
match = re.search(r'!', config[line_end:], re.M)
if not match:
return config[block_start:]
_, block_end = match.regs[0]
block_end = line_end + block_end
return config[block_start:block_end]
def valid_template(port, template):
''' Test if the user provided Jinja template is valid.
:param port: User specified port.
:param template: Contents of Jinja template.
:return: True or False
'''
valid = True
regex = r'^interface Ethernet%s' % port
match = re.match(regex, template, re.M)
if not match:
valid = False
return valid
def config_from_template(module):
''' Load the Jinja template and apply user provided parameters in necessary
places. Fail if template is not found. Fail if rendered template does
not reference the correct port. Fail if the template requires a VLAN
but the user did not provide one with the port_vlan parameter.
:param module: Ansible module with parameters and client connection.
:return: String of Jinja template rendered with parameters or exit with
failure.
'''
template_loader = jinja2.FileSystemLoader('./templates')
env = jinja2.Environment(loader=template_loader,
undefined=jinja2.DebugUndefined)
template = env.get_template(module.params['template'])
if not template:
module.fail_json(msg=str('Could not find template - %s'
% module.params['template']))
data = {'switch_port': module.params['switch_port'],
'server_name': module.params['server_name']}
temp_source = env.loader.get_source(env, module.params['template'])[0]
parsed_content = env.parse(temp_source)
temp_vars = list(meta.find_undeclared_variables(parsed_content))
if 'port_vlan' in temp_vars:
if module.params['port_vlan']:
data['port_vlan'] = module.params['port_vlan']
else:
module.fail_json(msg=str('Template %s requires a vlan. Please'
' re-run with vlan number provided.'
% module.params['template']))
template = template.render(data)
if not valid_template(module.params['switch_port'], template):
module.fail_json(msg=str('Template content does not configure proper'
' interface - %s' % template))
return template
def updated_configlet_content(module, existing_config, new_config):
''' Update the configlet configuration with the new section for the port
specified by the user.
:param module: Ansible module with parameters and client connection.
:param existing_config: String of current configlet configuration.
:param new_config: String of configuration for user specified port to
replace in the existing config.
:return: String of the full updated configuration.
'''
regex = r'^interface Ethernet%s' % module.params['switch_port']
match = re.search(regex, existing_config, re.M)
if not match:
module.fail_json(msg=str('interface section not found - %s'
% existing_config))
block_start, line_end = match.regs[0]
updated_config = existing_config[:block_start] + new_config
match = re.search(r'!\n', existing_config[line_end:], re.M)
if match:
_, block_end = match.regs[0]
block_end = line_end + block_end
updated_config += '\n%s' % existing_config[block_end:]
return updated_config
def configlet_update_task(module):
''' Poll device info of switch from CVP up to three times to see if the
configlet updates have spawned a task. It sometimes takes a second for
the task to be spawned after configlet updates. If a task is found
return the task ID. Otherwise return None.
:param module: Ansible module with parameters and client connection.
:return: Task ID or None.
'''
for num in range(3):
device_info = switch_info(module)
if (('taskIdList' in device_info) and
(len(device_info['taskIdList']) > 0)):
for task in device_info['taskIdList']:
if ('Configlet Assign' in task['description'] and
task['data']['WORKFLOW_ACTION'] == 'Configlet Push'):
return task['workOrderId']
time.sleep(1)
return None
def wait_for_task_completion(module, task):
''' Poll CVP for the executed task to complete. There is currently no
timeout. Exits with failure if task status is Failed or Cancelled.
:param module: Ansible module with parameters and client connection.
:param task: Task ID to poll for completion.
:return: True or exit with failure if task is cancelled or fails.
'''
task_complete = False
while not task_complete:
task_info = module.client.api.get_task_by_id(task)
task_status = task_info['workOrderUserDefinedStatus']
if task_status == 'Completed':
return True
elif task_status in ['Failed', 'Cancelled']:
module.fail_json(msg=str('Task %s has reported status %s. Please'
' consult the CVP admins for more'
' information.' % (task, task_status)))
time.sleep(2)
def main():
""" main entry point for module execution
"""
argument_spec = dict(
host=dict(required=True),
port=dict(required=False, default=None),
protocol=dict(default='https', choices=['http', 'https']),
username=dict(required=True),
password=dict(required=True, no_log=True),
server_name=dict(required=True),
switch_name=dict(required=True),
switch_port=dict(required=True),
port_vlan=dict(required=False, default=None),
template=dict(require=True),
action=dict(default='show', choices=['show', 'add', 'remove']),
auto_run=dict(type='bool', default=False))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False)
if not HAS_JINJA2:
module.fail_json(msg='The Jinja2 python module is required.')
if not HAS_CVPRAC:
module.fail_json(msg='The cvprac python module is required.')
result = dict(changed=False)
module.client = connect(module)
try:
result['switchInfo'] = switch_info(module)
if module.params['action'] in ['add', 'remove']:
switch_in_compliance(module, result['switchInfo'])
switch_configlet = server_configurable_configlet(module,
result['switchInfo'])
if not switch_configlet:
module.fail_json(msg=str('Switch %s has no configurable server'
' ports.' % module.params['switch_name']))
result['switchConfigurable'] = True
if not port_configurable(module, switch_configlet):
module.fail_json(msg=str('Port %s is not configurable as a server'
' port on switch %s.'
% (module.params['switch_port'],
module.params['switch_name'])))
result['portConfigurable'] = True
result['taskCreated'] = False
result['taskExecuted'] = False
result['taskCompleted'] = False
result.update(configlet_action(module, switch_configlet))
if module.params['auto_run'] and module.params['action'] != 'show':
task_id = configlet_update_task(module)
if task_id:
result['taskId'] = task_id
note = ('Update config on %s with %s action from Ansible.'
% (module.params['switch_name'],
module.params['action']))
module.client.api.add_note_to_task(task_id, note)
module.client.api.execute_task(task_id)
result['taskExecuted'] = True
task_completed = wait_for_task_completion(module, task_id)
if task_completed:
result['taskCompleted'] = True
else:
result['taskCreated'] = False
except CvpApiError as e:
module.fail_json(msg=str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ronniehd/repository.ronniehd | program.plexus/resources/plexus/plexusutils/directoryhandle.py | 25 | 2478 | # -*- coding: utf-8 -*-
""" Plexus (c) 2015 enen92
This file contains the functions for xbmc addon directory handle
Functions:
addLink(name,url,iconimage,fan_art="%s/fanart.jpg"%settings.getAddonInfo("path")) -> Addlink function used in the 'whole' addon
addDir(name,url,mode,iconimage,total,pasta,fan_art="%s/fanart.jpg"%settings.getAddonInfo("path"),parser=None,parserfunction=None) -> AddDir function used in the whole addon
"""
import xbmc
import xbmcgui
import xbmcvfs
import xbmcplugin
import os
import urllib
import sys
import hashlib
from pluginxbmc import *
"""
Common addDir functions for main addon
"""
def addLink(name,url,iconimage,fan_art="%s/fanart.jpg"%settings.getAddonInfo("path")):
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
liz.setProperty('fanart_image', fan_art)
return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz)
def addDir(name,url,mode,iconimage,total,pasta,fan_art="%s/fanart.jpg"%settings.getAddonInfo("path"),parser=None,parserfunction=None):
if "plugin://" in sys.argv[0]: u = sys.argv[0]; sysargv = sys.argv[0]
else: u = 'plugin://plugin.video.p2p-streams/'; sysargv = 'plugin://plugin.video.p2p-streams/'
u += "?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)
try: u += "&parser="+urllib.quote_plus(parser)
except: pass
try: u += "&parserfunction="+urllib.quote_plus(parserfunction)
except: pass
contextmen = []
liz=xbmcgui.ListItem(name,iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name} )
liz.setProperty('fanart_image', fan_art)
if mode == 1 or mode == 2:
fic = hashlib.md5(name + '|' + url).hexdigest() + '.txt'
if os.path.exists(os.path.join(mystrm_folder,fic)):
contextmen.append((translate(30025), 'XBMC.RunPlugin(%s?mode=13&url=%s&name=%s&iconimage=%s)' % (sysargv, urllib.quote_plus(url),name,iconimage)))
else:
contextmen.append((translate(30026), 'XBMC.RunPlugin(%s?mode=12&url=%s&name=%s&iconimage=%s)' % (sysargv,urllib.quote_plus(url),name,iconimage)))
liz.addContextMenuItems(contextmen,replaceItems=False)
return xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=pasta,totalItems=total)
| gpl-3.0 |
jfpla/odoo | openerp/osv/query.py | 380 | 7513 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP S.A. http://www.openerp.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
def _quote(to_quote):
if '"' not in to_quote:
return '"%s"' % to_quote
return to_quote
class Query(object):
"""
Dumb implementation of a Query object, using 3 string lists so far
for backwards compatibility with the (table, where_clause, where_params) previously used.
TODO: To be improved after v6.0 to rewrite part of the ORM and add support for:
- auto-generated multiple table aliases
- multiple joins to the same table with different conditions
- dynamic right-hand-side values in domains (e.g. a.name = a.description)
- etc.
"""
def __init__(self, tables=None, where_clause=None, where_clause_params=None, joins=None):
# holds the list of tables joined using default JOIN.
# the table names are stored double-quoted (backwards compatibility)
self.tables = tables or []
# holds the list of WHERE clause elements, to be joined with
# 'AND' when generating the final query
self.where_clause = where_clause or []
# holds the parameters for the formatting of `where_clause`, to be
# passed to psycopg's execute method.
self.where_clause_params = where_clause_params or []
# holds table joins done explicitly, supporting outer joins. The JOIN
# condition should not be in `where_clause`. The dict is used as follows:
# self.joins = {
# 'table_a': [
# ('table_b', 'table_a_col1', 'table_b_col', 'LEFT JOIN'),
# ('table_c', 'table_a_col2', 'table_c_col', 'LEFT JOIN'),
# ('table_d', 'table_a_col3', 'table_d_col', 'JOIN'),
# ]
# }
# which should lead to the following SQL:
# SELECT ... FROM "table_a" LEFT JOIN "table_b" ON ("table_a"."table_a_col1" = "table_b"."table_b_col")
# LEFT JOIN "table_c" ON ("table_a"."table_a_col2" = "table_c"."table_c_col")
self.joins = joins or {}
def _get_table_aliases(self):
from openerp.osv.expression import get_alias_from_query
return [get_alias_from_query(from_statement)[1] for from_statement in self.tables]
def _get_alias_mapping(self):
from openerp.osv.expression import get_alias_from_query
mapping = {}
for table in self.tables:
alias, statement = get_alias_from_query(table)
mapping[statement] = table
return mapping
def add_join(self, connection, implicit=True, outer=False):
""" Join a destination table to the current table.
:param implicit: False if the join is an explicit join. This allows
to fall back on the previous implementation of ``join`` before
OpenERP 7.0. It therefore adds the JOIN specified in ``connection``
If True, the join is done implicitely, by adding the table alias
in the from clause and the join condition in the where clause
of the query. Implicit joins do not handle outer parameter.
:param connection: a tuple ``(lhs, table, lhs_col, col, link)``.
The join corresponds to the SQL equivalent of::
(lhs.lhs_col = table.col)
Note that all connection elements are strings. Please refer to expression.py for more details about joins.
:param outer: True if a LEFT OUTER JOIN should be used, if possible
(no promotion to OUTER JOIN is supported in case the JOIN
was already present in the query, as for the moment
implicit INNER JOINs are only connected from NON-NULL
columns so it would not be correct (e.g. for
``_inherits`` or when a domain criterion explicitly
adds filtering)
"""
from openerp.osv.expression import generate_table_alias
(lhs, table, lhs_col, col, link) = connection
alias, alias_statement = generate_table_alias(lhs, [(table, link)])
if implicit:
if alias_statement not in self.tables:
self.tables.append(alias_statement)
condition = '("%s"."%s" = "%s"."%s")' % (lhs, lhs_col, alias, col)
self.where_clause.append(condition)
else:
# already joined
pass
return alias, alias_statement
else:
aliases = self._get_table_aliases()
assert lhs in aliases, "Left-hand-side table %s must already be part of the query tables %s!" % (lhs, str(self.tables))
if alias_statement in self.tables:
# already joined, must ignore (promotion to outer and multiple joins not supported yet)
pass
else:
# add JOIN
self.tables.append(alias_statement)
self.joins.setdefault(lhs, []).append((alias, lhs_col, col, outer and 'LEFT JOIN' or 'JOIN'))
return alias, alias_statement
def get_sql(self):
""" Returns (query_from, query_where, query_params). """
from openerp.osv.expression import get_alias_from_query
query_from = ''
tables_to_process = list(self.tables)
alias_mapping = self._get_alias_mapping()
def add_joins_for_table(table, query_from):
for (dest_table, lhs_col, col, join) in self.joins.get(table, []):
tables_to_process.remove(alias_mapping[dest_table])
query_from += ' %s %s ON ("%s"."%s" = "%s"."%s")' % \
(join, alias_mapping[dest_table], table, lhs_col, dest_table, col)
query_from = add_joins_for_table(dest_table, query_from)
return query_from
for table in tables_to_process:
query_from += table
table_alias = get_alias_from_query(table)[1]
if table_alias in self.joins:
query_from = add_joins_for_table(table_alias, query_from)
query_from += ','
query_from = query_from[:-1] # drop last comma
return query_from, " AND ".join(self.where_clause), self.where_clause_params
def __str__(self):
return '<osv.Query: "SELECT ... FROM %s WHERE %s" with params: %r>' % self.get_sql()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tvibliani/odoo | openerp/addons/base/ir/ir_ui_view.py | 22 | 52474 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import collections
import copy
import datetime
import dateutil
from dateutil.relativedelta import relativedelta
import fnmatch
import logging
import os
import time
from operator import itemgetter
import simplejson
import werkzeug
import HTMLParser
from lxml import etree
import openerp
from openerp import tools, api
from openerp.http import request
from openerp.osv import fields, osv, orm
from openerp.tools import graph, SKIPPED_ELEMENT_TYPES, SKIPPED_ELEMENTS
from openerp.tools.parse_version import parse_version
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.view_validation import valid_view
from openerp.tools import misc
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MOVABLE_BRANDING = ['data-oe-model', 'data-oe-id', 'data-oe-field', 'data-oe-xpath', 'data-oe-source-id']
def keep_query(*keep_params, **additional_params):
"""
Generate a query string keeping the current request querystring's parameters specified
in ``keep_params`` and also adds the parameters specified in ``additional_params``.
Multiple values query string params will be merged into a single one with comma seperated
values.
The ``keep_params`` arguments can use wildcards too, eg:
keep_query('search', 'shop_*', page=4)
"""
if not keep_params and not additional_params:
keep_params = ('*',)
params = additional_params.copy()
qs_keys = request.httprequest.args.keys()
for keep_param in keep_params:
for param in fnmatch.filter(qs_keys, keep_param):
if param not in additional_params and param in qs_keys:
params[param] = request.httprequest.args.getlist(param)
return werkzeug.urls.url_encode(params)
class view_custom(osv.osv):
_name = 'ir.ui.view.custom'
_order = 'create_date desc' # search(limit=1) should return the last customization
_columns = {
'ref_id': fields.many2one('ir.ui.view', 'Original View', select=True, required=True, ondelete='cascade'),
'user_id': fields.many2one('res.users', 'User', select=True, required=True, ondelete='cascade'),
'arch': fields.text('View Architecture', required=True),
}
def name_get(self, cr, uid, ids, context=None):
return [(rec.id, rec.user_id.name) for rec in self.browse(cr, uid, ids, context=context)]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if name:
ids = self.search(cr, user, [('user_id', operator, name)] + args, limit=limit)
return self.name_get(cr, user, ids, context=context)
return super(view_custom, self).name_search(cr, user, name, args=args, operator=operator, context=context, limit=limit)
def _auto_init(self, cr, context=None):
res = super(view_custom, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_ui_view_custom_user_id_ref_id\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_ui_view_custom_user_id_ref_id ON ir_ui_view_custom (user_id, ref_id)')
return res
def _hasclass(context, *cls):
""" Checks if the context node has all the classes passed as arguments
"""
node_classes = set(context.context_node.attrib.get('class', '').split())
return node_classes.issuperset(cls)
xpath_utils = etree.FunctionNamespace(None)
xpath_utils['hasclass'] = _hasclass
class view(osv.osv):
_name = 'ir.ui.view'
_parent_name = 'inherit_id' # used for recursion check
def _get_model_data(self, cr, uid, ids, fname, args, context=None):
result = dict.fromkeys(ids, False)
IMD = self.pool['ir.model.data']
data_ids = IMD.search_read(cr, uid, [('res_id', 'in', ids), ('model', '=', 'ir.ui.view')], ['res_id'], context=context)
result.update(map(itemgetter('res_id', 'id'), data_ids))
return result
_columns = {
'name': fields.char('View Name', required=True),
'model': fields.char('Object', select=True),
'priority': fields.integer('Sequence', required=True),
'type': fields.selection([
('tree','Tree'),
('form','Form'),
('graph', 'Graph'),
('calendar', 'Calendar'),
('diagram','Diagram'),
('gantt', 'Gantt'),
('kanban', 'Kanban'),
('search','Search'),
('qweb', 'QWeb')], string='View Type'),
'arch': fields.text('View Architecture', required=True),
'inherit_id': fields.many2one('ir.ui.view', 'Inherited View', ondelete='restrict', select=True),
'inherit_children_ids': fields.one2many('ir.ui.view','inherit_id', 'Inherit Views'),
'field_parent': fields.char('Child Field'),
'model_data_id': fields.function(_get_model_data, type='many2one', relation='ir.model.data', string="Model Data", store=True),
'xml_id': fields.function(osv.osv.get_xml_id, type='char', size=128, string="External ID",
help="ID of the view defined in xml file"),
'groups_id': fields.many2many('res.groups', 'ir_ui_view_group_rel', 'view_id', 'group_id',
string='Groups', help="If this field is empty, the view applies to all users. Otherwise, the view applies to the users of those groups only."),
'model_ids': fields.one2many('ir.model.data', 'res_id', domain=[('model','=','ir.ui.view')], auto_join=True),
'create_date': fields.datetime('Create Date', readonly=True),
'write_date': fields.datetime('Last Modification Date', readonly=True),
'mode': fields.selection(
[('primary', "Base view"), ('extension', "Extension View")],
string="View inheritance mode", required=True,
help="""Only applies if this view inherits from an other one (inherit_id is not False/Null).
* if extension (default), if this view is requested the closest primary view
is looked up (via inherit_id), then all views inheriting from it with this
view's model are applied
* if primary, the closest primary view is fully resolved (even if it uses a
different model than this one), then this view's inheritance specs
(<xpath/>) are applied, and the result is used as if it were this view's
actual arch.
"""),
'active': fields.boolean("Active",
help="""If this view is inherited,
* if True, the view always extends its parent
* if False, the view currently does not extend its parent but can be enabled
"""),
}
_defaults = {
'mode': 'primary',
'active': True,
'priority': 16,
}
_order = "priority,name"
# Holds the RNG schema
_relaxng_validator = None
def _relaxng(self):
if not self._relaxng_validator:
frng = tools.file_open(os.path.join('base','rng','view.rng'))
try:
relaxng_doc = etree.parse(frng)
self._relaxng_validator = etree.RelaxNG(relaxng_doc)
except Exception:
_logger.exception('Failed to load RelaxNG XML schema for views validation')
finally:
frng.close()
return self._relaxng_validator
def _check_xml(self, cr, uid, ids, context=None):
# As all constraints are verified on create/write, we must re-check that there is no
# recursion before calling `read_combined` to avoid an infinite loop.
if not self._check_recursion(cr, uid, ids, context=context):
return True # pretend arch is valid to avoid misleading user about the error.
if context is None:
context = {}
context = dict(context, check_view_ids=ids)
# Sanity checks: the view should not break anything upon rendering!
# Any exception raised below will cause a transaction rollback.
for view in self.browse(cr, uid, ids, context):
view_def = self.read_combined(cr, uid, view.id, ['arch'], context=context)
view_arch_utf8 = view_def['arch']
if view.type != 'qweb':
view_doc = etree.fromstring(view_arch_utf8)
# verify that all fields used are valid, etc.
self.postprocess_and_fields(cr, uid, view.model, view_doc, view.id, context=context)
# RNG-based validation is not possible anymore with 7.0 forms
view_docs = [view_doc]
if view_docs[0].tag == 'data':
# A <data> element is a wrapper for multiple root nodes
view_docs = view_docs[0]
validator = self._relaxng()
for view_arch in view_docs:
version = view_arch.get('version', '7.0')
if parse_version(version) < parse_version('7.0') and validator and not validator.validate(view_arch):
for error in validator.error_log:
_logger.error(tools.ustr(error))
return False
if not valid_view(view_arch):
return False
return True
_sql_constraints = [
('inheritance_mode',
"CHECK (mode != 'extension' OR inherit_id IS NOT NULL)",
"Invalid inheritance mode: if the mode is 'extension', the view must"
" extend an other view"),
]
_constraints = [
(_check_xml, 'Invalid view definition', ['arch']),
(osv.osv._check_recursion, 'You cannot create recursive inherited views.', ['inherit_id']),
]
def _auto_init(self, cr, context=None):
res = super(view, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_ui_view_model_type_inherit_id\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_ui_view_model_type_inherit_id ON ir_ui_view (model, inherit_id)')
return res
def _compute_defaults(self, cr, uid, values, context=None):
if 'inherit_id' in values:
values.setdefault(
'mode', 'extension' if values['inherit_id'] else 'primary')
return values
def create(self, cr, uid, values, context=None):
if not values.get('type'):
if values.get('inherit_id'):
values['type'] = self.browse(cr, uid, values['inherit_id'], context).type
else:
values['type'] = etree.fromstring(values['arch']).tag
if not values.get('name'):
values['name'] = "%s %s" % (values.get('model'), values['type'])
self.clear_cache()
return super(view, self).create(
cr, uid,
self._compute_defaults(cr, uid, values, context=context),
context=context)
def write(self, cr, uid, ids, vals, context=None):
if not isinstance(ids, (list, tuple)):
ids = [ids]
if context is None:
context = {}
# drop the corresponding view customizations (used for dashboards for example), otherwise
# not all users would see the updated views
custom_view_ids = self.pool.get('ir.ui.view.custom').search(cr, uid, [('ref_id', 'in', ids)])
if custom_view_ids:
self.pool.get('ir.ui.view.custom').unlink(cr, uid, custom_view_ids)
self.clear_cache()
ret = super(view, self).write(
cr, uid, ids,
self._compute_defaults(cr, uid, vals, context=context),
context)
return ret
def toggle(self, cr, uid, ids, context=None):
""" Switches between enabled and disabled statuses
"""
for view in self.browse(cr, uid, ids, context=dict(context or {}, active_test=False)):
view.write({'active': not view.active})
# default view selection
def default_view(self, cr, uid, model, view_type, context=None):
""" Fetches the default view for the provided (model, view_type) pair:
primary view with the lowest priority.
:param str model:
:param int view_type:
:return: id of the default view of False if none found
:rtype: int
"""
domain = [
['model', '=', model],
['type', '=', view_type],
['mode', '=', 'primary'],
]
ids = self.search(cr, uid, domain, limit=1, context=context)
if not ids:
return False
return ids[0]
#------------------------------------------------------
# Inheritance mecanism
#------------------------------------------------------
def get_inheriting_views_arch(self, cr, uid, view_id, model, context=None):
"""Retrieves the architecture of views that inherit from the given view, from the sets of
views that should currently be used in the system. During the module upgrade phase it
may happen that a view is present in the database but the fields it relies on are not
fully loaded yet. This method only considers views that belong to modules whose code
is already loaded. Custom views defined directly in the database are loaded only
after the module initialization phase is completely finished.
:param int view_id: id of the view whose inheriting views should be retrieved
:param str model: model identifier of the inheriting views.
:rtype: list of tuples
:return: [(view_arch,view_id), ...]
"""
if not context:
context = {}
user = self.pool['res.users'].browse(cr, 1, uid, context=context)
user_groups = frozenset(user.groups_id or ())
conditions = [
['inherit_id', '=', view_id],
['model', '=', model],
['mode', '=', 'extension'],
['active', '=', True],
]
if self.pool._init and not context.get('load_all_views'):
# Module init currently in progress, only consider views from
# modules whose code is already loaded
conditions.extend([
'|',
['model_ids.module', 'in', tuple(self.pool._init_modules)],
['id', 'in', context.get('check_view_ids') or (0,)],
])
view_ids = self.search(cr, uid, conditions, context=context)
return [(view.arch, view.id)
for view in self.browse(cr, 1, view_ids, context)
if not (view.groups_id and user_groups.isdisjoint(view.groups_id))]
def raise_view_error(self, cr, uid, message, view_id, context=None):
view = self.browse(cr, uid, view_id, context)
not_avail = _('n/a')
message = ("%(msg)s\n\n" +
_("Error context:\nView `%(view_name)s`") +
"\n[view_id: %(viewid)s, xml_id: %(xmlid)s, "
"model: %(model)s, parent_id: %(parent)s]") % \
{
'view_name': view.name or not_avail,
'viewid': view_id or not_avail,
'xmlid': view.xml_id or not_avail,
'model': view.model or not_avail,
'parent': view.inherit_id.id or not_avail,
'msg': message,
}
_logger.error(message)
raise AttributeError(message)
def locate_node(self, arch, spec):
""" Locate a node in a source (parent) architecture.
Given a complete source (parent) architecture (i.e. the field
`arch` in a view), and a 'spec' node (a node in an inheriting
view that specifies the location in the source view of what
should be changed), return (if it exists) the node in the
source view matching the specification.
:param arch: a parent architecture to modify
:param spec: a modifying node in an inheriting view
:return: a node in the source matching the spec
"""
if spec.tag == 'xpath':
nodes = arch.xpath(spec.get('expr'))
return nodes[0] if nodes else None
elif spec.tag == 'field':
# Only compare the field name: a field can be only once in a given view
# at a given level (and for multilevel expressions, we should use xpath
# inheritance spec anyway).
for node in arch.iter('field'):
if node.get('name') == spec.get('name'):
return node
return None
for node in arch.iter(spec.tag):
if isinstance(node, SKIPPED_ELEMENT_TYPES):
continue
if all(node.get(attr) == spec.get(attr) for attr in spec.attrib
if attr not in ('position','version')):
# Version spec should match parent's root element's version
if spec.get('version') and spec.get('version') != arch.get('version'):
return None
return node
return None
def inherit_branding(self, specs_tree, view_id, root_id):
for node in specs_tree.iterchildren(tag=etree.Element):
xpath = node.getroottree().getpath(node)
if node.tag == 'data' or node.tag == 'xpath' or node.get('position') or node.get('t-field'):
self.inherit_branding(node, view_id, root_id)
else:
node.set('data-oe-id', str(view_id))
node.set('data-oe-source-id', str(root_id))
node.set('data-oe-xpath', xpath)
node.set('data-oe-model', 'ir.ui.view')
node.set('data-oe-field', 'arch')
return specs_tree
def apply_inheritance_specs(self, cr, uid, source, specs_tree, inherit_id, context=None):
""" Apply an inheriting view (a descendant of the base view)
Apply to a source architecture all the spec nodes (i.e. nodes
describing where and what changes to apply to some parent
architecture) given by an inheriting view.
:param Element source: a parent architecture to modify
:param Elepect specs_tree: a modifying architecture in an inheriting view
:param inherit_id: the database id of specs_arch
:return: a modified source where the specs are applied
:rtype: Element
"""
# Queue of specification nodes (i.e. nodes describing where and
# changes to apply to some parent architecture).
specs = [specs_tree]
while len(specs):
spec = specs.pop(0)
if isinstance(spec, SKIPPED_ELEMENT_TYPES):
continue
if spec.tag == 'data':
specs += [c for c in spec]
continue
node = self.locate_node(source, spec)
if node is not None:
pos = spec.get('position', 'inside')
if pos == 'replace':
if node.getparent() is None:
source = copy.deepcopy(spec[0])
else:
for child in spec:
node.addprevious(child)
node.getparent().remove(node)
elif pos == 'attributes':
for child in spec.getiterator('attribute'):
attribute = (child.get('name'), child.text or None)
if attribute[1]:
node.set(attribute[0], attribute[1])
elif attribute[0] in node.attrib:
del node.attrib[attribute[0]]
else:
sib = node.getnext()
for child in spec:
if pos == 'inside':
node.append(child)
elif pos == 'after':
if sib is None:
node.addnext(child)
node = child
else:
sib.addprevious(child)
elif pos == 'before':
node.addprevious(child)
else:
self.raise_view_error(cr, uid, _("Invalid position attribute: '%s'") % pos, inherit_id, context=context)
else:
attrs = ''.join([
' %s="%s"' % (attr, spec.get(attr))
for attr in spec.attrib
if attr != 'position'
])
tag = "<%s%s>" % (spec.tag, attrs)
self.raise_view_error(cr, uid, _("Element '%s' cannot be located in parent view") % tag, inherit_id, context=context)
return source
def apply_view_inheritance(self, cr, uid, source, source_id, model, root_id=None, context=None):
""" Apply all the (directly and indirectly) inheriting views.
:param source: a parent architecture to modify (with parent modifications already applied)
:param source_id: the database view_id of the parent view
:param model: the original model for which we create a view (not
necessarily the same as the source's model); only the inheriting
views with that specific model will be applied.
:return: a modified source where all the modifying architecture are applied
"""
if context is None: context = {}
if root_id is None:
root_id = source_id
sql_inherit = self.get_inheriting_views_arch(cr, uid, source_id, model, context=context)
for (specs, view_id) in sql_inherit:
specs_tree = etree.fromstring(specs.encode('utf-8'))
if context.get('inherit_branding'):
self.inherit_branding(specs_tree, view_id, root_id)
source = self.apply_inheritance_specs(cr, uid, source, specs_tree, view_id, context=context)
source = self.apply_view_inheritance(cr, uid, source, view_id, model, root_id=root_id, context=context)
return source
def read_combined(self, cr, uid, view_id, fields=None, context=None):
"""
Utility function to get a view combined with its inherited views.
* Gets the top of the view tree if a sub-view is requested
* Applies all inherited archs on the root view
* Returns the view with all requested fields
.. note:: ``arch`` is always added to the fields list even if not
requested (similar to ``id``)
"""
if context is None: context = {}
context = context.copy()
# if view_id is not a root view, climb back to the top.
base = v = self.browse(cr, uid, view_id, context=context)
check_view_ids = context.setdefault('check_view_ids', [])
while v.mode != 'primary':
# Add inherited views to the list of loading forced views
# Otherwise, inherited views could not find elements created in their direct parents if that parent is defined in the same module
check_view_ids.append(v.id)
v = v.inherit_id
root_id = v.id
# arch and model fields are always returned
if fields:
fields = list({'arch', 'model'}.union(fields))
# read the view arch
[view] = self.read(cr, uid, [root_id], fields=fields, context=context)
view_arch = etree.fromstring(view['arch'].encode('utf-8'))
if not v.inherit_id:
arch_tree = view_arch
else:
parent_view = self.read_combined(
cr, uid, v.inherit_id.id, fields=fields, context=context)
arch_tree = etree.fromstring(parent_view['arch'])
arch_tree = self.apply_inheritance_specs(
cr, uid, arch_tree, view_arch, parent_view['id'], context=context)
if context.get('inherit_branding'):
arch_tree.attrib.update({
'data-oe-model': 'ir.ui.view',
'data-oe-id': str(root_id),
'data-oe-field': 'arch',
})
# and apply inheritance
arch = self.apply_view_inheritance(
cr, uid, arch_tree, root_id, base.model, context=context)
return dict(view, arch=etree.tostring(arch, encoding='utf-8'))
#------------------------------------------------------
# Postprocessing: translation, groups and modifiers
#------------------------------------------------------
# TODO:
# - split postprocess so that it can be used instead of translate_qweb
# - remove group processing from ir_qweb
#------------------------------------------------------
def postprocess(self, cr, user, model, node, view_id, in_tree_view, model_fields, context=None):
"""Return the description of the fields in the node.
In a normal call to this method, node is a complete view architecture
but it is actually possible to give some sub-node (this is used so
that the method can call itself recursively).
Originally, the field descriptions are drawn from the node itself.
But there is now some code calling fields_get() in order to merge some
of those information in the architecture.
"""
if context is None:
context = {}
result = False
fields = {}
children = True
modifiers = {}
Model = self.pool.get(model)
if Model is None:
self.raise_view_error(cr, user, _('Model not found: %(model)s') % dict(model=model),
view_id, context)
def encode(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
def check_group(node):
"""Apply group restrictions, may be set at view level or model level::
* at view level this means the element should be made invisible to
people who are not members
* at model level (exclusively for fields, obviously), this means
the field should be completely removed from the view, as it is
completely unavailable for non-members
:return: True if field should be included in the result of fields_view_get
"""
if node.tag == 'field' and node.get('name') in Model._fields:
field = Model._fields[node.get('name')]
if field.groups and not self.user_has_groups(
cr, user, groups=field.groups, context=context):
node.getparent().remove(node)
fields.pop(node.get('name'), None)
# no point processing view-level ``groups`` anymore, return
return False
if node.get('groups'):
can_see = self.user_has_groups(
cr, user, groups=node.get('groups'), context=context)
if not can_see:
node.set('invisible', '1')
modifiers['invisible'] = True
if 'attrs' in node.attrib:
del(node.attrib['attrs']) #avoid making field visible later
del(node.attrib['groups'])
return True
if node.tag in ('field', 'node', 'arrow'):
if node.get('object'):
attrs = {}
views = {}
xml = "<form>"
for f in node:
if f.tag == 'field':
xml += etree.tostring(f, encoding="utf-8")
xml += "</form>"
new_xml = etree.fromstring(encode(xml))
ctx = context.copy()
ctx['base_model_name'] = model
xarch, xfields = self.postprocess_and_fields(cr, user, node.get('object'), new_xml, view_id, ctx)
views['form'] = {
'arch': xarch,
'fields': xfields
}
attrs = {'views': views}
fields = xfields
if node.get('name'):
attrs = {}
field = Model._fields.get(node.get('name'))
if field:
children = False
views = {}
for f in node:
if f.tag in ('form', 'tree', 'graph', 'kanban', 'calendar'):
node.remove(f)
ctx = context.copy()
ctx['base_model_name'] = model
xarch, xfields = self.postprocess_and_fields(cr, user, field.comodel_name, f, view_id, ctx)
views[str(f.tag)] = {
'arch': xarch,
'fields': xfields
}
attrs = {'views': views}
fields[node.get('name')] = attrs
field = model_fields.get(node.get('name'))
if field:
orm.transfer_field_to_modifiers(field, modifiers)
elif node.tag in ('form', 'tree'):
result = Model.view_header_get(cr, user, False, node.tag, context=context)
if result:
node.set('string', result)
in_tree_view = node.tag == 'tree'
elif node.tag == 'calendar':
for additional_field in ('date_start', 'date_delay', 'date_stop', 'color', 'all_day', 'attendee'):
if node.get(additional_field):
fields[node.get(additional_field)] = {}
if not check_group(node):
# node must be removed, no need to proceed further with its children
return fields
# The view architeture overrides the python model.
# Get the attrs before they are (possibly) deleted by check_group below
orm.transfer_node_to_modifiers(node, modifiers, context, in_tree_view)
# TODO remove attrs counterpart in modifiers when invisible is true ?
# translate view
if 'lang' in context:
Translations = self.pool['ir.translation']
if node.text and node.text.strip():
term = node.text.strip()
trans = Translations._get_source(cr, user, model, 'view', context['lang'], term)
if trans:
node.text = node.text.replace(term, trans)
if node.tail and node.tail.strip():
term = node.tail.strip()
trans = Translations._get_source(cr, user, model, 'view', context['lang'], term)
if trans:
node.tail = node.tail.replace(term, trans)
if node.get('string') and node.get('string').strip() and not result:
term = node.get('string').strip()
trans = Translations._get_source(cr, user, model, 'view', context['lang'], term)
if trans == term:
if 'base_model_name' in context:
# If translation is same as source, perhaps we'd have more luck with the alternative model name
# (in case we are in a mixed situation, such as an inherited view where parent_view.model != model
trans = Translations._get_source(cr, user, context['base_model_name'], 'view', context['lang'], term)
else:
inherit_model = self.browse(cr, user, view_id, context=context).inherit_id.model or model
if inherit_model != model:
# parent view has a different model, if the terms belongs to the parent view, the translation
# should be checked on the parent model as well
trans = Translations._get_source(cr, user, inherit_model, 'view', context['lang'], term)
if trans:
node.set('string', trans)
for attr_name in ('confirm', 'sum', 'avg', 'help', 'placeholder'):
attr_value = node.get(attr_name)
if attr_value and attr_value.strip():
trans = Translations._get_source(cr, user, model, 'view', context['lang'], attr_value.strip())
if trans:
node.set(attr_name, trans)
for f in node:
if children or (node.tag == 'field' and f.tag in ('filter','separator')):
fields.update(self.postprocess(cr, user, model, f, view_id, in_tree_view, model_fields, context))
orm.transfer_modifiers_to_node(modifiers, node)
return fields
def add_on_change(self, cr, user, model_name, arch):
""" Add attribute on_change="1" on fields that are dependencies of
computed fields on the same view.
"""
# map each field object to its corresponding nodes in arch
field_nodes = collections.defaultdict(list)
def collect(node, model):
if node.tag == 'field':
field = model._fields.get(node.get('name'))
if field:
field_nodes[field].append(node)
if field.relational:
model = self.pool.get(field.comodel_name)
for child in node:
collect(child, model)
collect(arch, self.pool[model_name])
for field, nodes in field_nodes.iteritems():
# if field should trigger an onchange, add on_change="1" on the
# nodes referring to field
model = self.pool[field.model_name]
if model._has_onchange(field, field_nodes):
for node in nodes:
if not node.get('on_change'):
node.set('on_change', '1')
return arch
def _disable_workflow_buttons(self, cr, user, model, node):
""" Set the buttons in node to readonly if the user can't activate them. """
if model is None or user == 1:
# admin user can always activate workflow buttons
return node
# TODO handle the case of more than one workflow for a model or multiple
# transitions with different groups and same signal
usersobj = self.pool.get('res.users')
buttons = (n for n in node.getiterator('button') if n.get('type') != 'object')
for button in buttons:
user_groups = usersobj.read(cr, user, [user], ['groups_id'])[0]['groups_id']
cr.execute("""SELECT DISTINCT t.group_id
FROM wkf
INNER JOIN wkf_activity a ON a.wkf_id = wkf.id
INNER JOIN wkf_transition t ON (t.act_to = a.id)
WHERE wkf.osv = %s
AND t.signal = %s
AND t.group_id is NOT NULL
""", (model, button.get('name')))
group_ids = [x[0] for x in cr.fetchall() if x[0]]
can_click = not group_ids or bool(set(user_groups).intersection(group_ids))
button.set('readonly', str(int(not can_click)))
return node
def postprocess_and_fields(self, cr, user, model, node, view_id, context=None):
""" Return an architecture and a description of all the fields.
The field description combines the result of fields_get() and
postprocess().
:param node: the architecture as as an etree
:return: a tuple (arch, fields) where arch is the given node as a
string and fields is the description of all the fields.
"""
fields = {}
Model = self.pool.get(model)
if Model is None:
self.raise_view_error(cr, user, _('Model not found: %(model)s') % dict(model=model), view_id, context)
if node.tag == 'diagram':
if node.getchildren()[0].tag == 'node':
node_model = self.pool[node.getchildren()[0].get('object')]
node_fields = node_model.fields_get(cr, user, None, context=context)
fields.update(node_fields)
if not node.get("create") and not node_model.check_access_rights(cr, user, 'create', raise_exception=False):
node.set("create", 'false')
if node.getchildren()[1].tag == 'arrow':
arrow_fields = self.pool[node.getchildren()[1].get('object')].fields_get(cr, user, None, context=context)
fields.update(arrow_fields)
else:
fields = Model.fields_get(cr, user, None, context=context)
node = self.add_on_change(cr, user, model, node)
fields_def = self.postprocess(cr, user, model, node, view_id, False, fields, context=context)
node = self._disable_workflow_buttons(cr, user, model, node)
if node.tag in ('kanban', 'tree', 'form', 'gantt'):
for action, operation in (('create', 'create'), ('delete', 'unlink'), ('edit', 'write')):
if not node.get(action) and not Model.check_access_rights(cr, user, operation, raise_exception=False):
node.set(action, 'false')
if node.tag in ('kanban'):
group_by_name = node.get('default_group_by')
if group_by_name in Model._fields:
group_by_field = Model._fields[group_by_name]
if group_by_field.type == 'many2one':
group_by_model = Model.pool[group_by_field.comodel_name]
for action, operation in (('group_create', 'create'), ('group_delete', 'unlink'), ('group_edit', 'write')):
if not node.get(action) and not group_by_model.check_access_rights(cr, user, operation, raise_exception=False):
node.set(action, 'false')
arch = etree.tostring(node, encoding="utf-8").replace('\t', '')
for k in fields.keys():
if k not in fields_def:
del fields[k]
for field in fields_def:
if field in fields:
fields[field].update(fields_def[field])
else:
message = _("Field `%(field_name)s` does not exist") % \
dict(field_name=field)
self.raise_view_error(cr, user, message, view_id, context)
return arch, fields
#------------------------------------------------------
# QWeb template views
#------------------------------------------------------
@tools.ormcache_context(accepted_keys=('lang','inherit_branding', 'editable', 'translatable'))
def read_template(self, cr, uid, xml_id, context=None):
if isinstance(xml_id, (int, long)):
view_id = xml_id
else:
if '.' not in xml_id:
raise ValueError('Invalid template id: %r' % (xml_id,))
view_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, xml_id, raise_if_not_found=True)
arch = self.read_combined(cr, uid, view_id, fields=['arch'], context=context)['arch']
arch_tree = etree.fromstring(arch)
if 'lang' in context:
arch_tree = self.translate_qweb(cr, uid, view_id, arch_tree, context['lang'], context)
self.distribute_branding(arch_tree)
root = etree.Element('templates')
root.append(arch_tree)
arch = etree.tostring(root, encoding='utf-8', xml_declaration=True)
return arch
def clear_cache(self):
self.read_template.clear_cache(self)
def _contains_branded(self, node):
return node.tag == 't'\
or 't-raw' in node.attrib\
or any(self.is_node_branded(child) for child in node.iterdescendants())
def _pop_view_branding(self, element):
distributed_branding = dict(
(attribute, element.attrib.pop(attribute))
for attribute in MOVABLE_BRANDING
if element.get(attribute))
return distributed_branding
def distribute_branding(self, e, branding=None, parent_xpath='',
index_map=misc.ConstantMapping(1)):
if e.get('t-ignore') or e.tag == 'head':
# remove any view branding possibly injected by inheritance
attrs = set(MOVABLE_BRANDING)
for descendant in e.iterdescendants(tag=etree.Element):
if not attrs.intersection(descendant.attrib): continue
self._pop_view_branding(descendant)
# TODO: find a better name and check if we have a string to boolean helper
return
node_path = e.get('data-oe-xpath')
if node_path is None:
node_path = "%s/%s[%d]" % (parent_xpath, e.tag, index_map[e.tag])
if branding and not (e.get('data-oe-model') or e.get('t-field')):
e.attrib.update(branding)
e.set('data-oe-xpath', node_path)
if not e.get('data-oe-model'): return
if {'t-esc', 't-raw'}.intersection(e.attrib):
# nodes which fully generate their content and have no reason to
# be branded because they can not sensibly be edited
self._pop_view_branding(e)
elif self._contains_branded(e):
# if a branded element contains branded elements distribute own
# branding to children unless it's t-raw, then just remove branding
# on current element
distributed_branding = self._pop_view_branding(e)
if 't-raw' not in e.attrib:
# TODO: collections.Counter if remove p2.6 compat
# running index by tag type, for XPath query generation
indexes = collections.defaultdict(lambda: 0)
for child in e.iterchildren(tag=etree.Element):
if child.get('data-oe-xpath'):
# injected by view inheritance, skip otherwise
# generated xpath is incorrect
self.distribute_branding(child)
else:
indexes[child.tag] += 1
self.distribute_branding(
child, distributed_branding,
parent_xpath=node_path, index_map=indexes)
def is_node_branded(self, node):
""" Finds out whether a node is branded or qweb-active (bears a
@data-oe-model or a @t-* *which is not t-field* as t-field does not
section out views)
:param node: an etree-compatible element to test
:type node: etree._Element
:rtype: boolean
"""
return any(
(attr in ('data-oe-model', 'group') or (attr != 't-field' and attr.startswith('t-')))
for attr in node.attrib
)
def _translate_qweb(self, cr, uid, arch, translate_func, context=None):
# TODO: this should be moved in a place before inheritance is applied
# but process() is only called on fields_view_get()
h = HTMLParser.HTMLParser()
def get_trans(text):
if not text or not text.strip():
return None
text = text.strip()
if len(text) < 2 or (text.startswith('<!') and text.endswith('>')):
return None
return translate_func(text)
if type(arch) not in SKIPPED_ELEMENT_TYPES and arch.tag not in SKIPPED_ELEMENTS:
text = get_trans(arch.text)
if text:
arch.text = arch.text.replace(arch.text.strip(), text)
tail = get_trans(arch.tail)
if tail:
arch.tail = arch.tail.replace(arch.tail.strip(), tail)
for attr_name in ('title', 'alt', 'label', 'placeholder'):
attr = get_trans(arch.get(attr_name))
if attr:
arch.set(attr_name, attr)
for node in arch.iterchildren("*"):
self._translate_qweb(cr, uid, node, translate_func, context)
def translate_qweb(self, cr, uid, id_, arch, lang, context=None):
view_ids = []
view = self.browse(cr, uid, id_, context=context)
if view:
view_ids.append(view.id)
if view.mode == 'primary' and view.inherit_id.mode == 'primary':
# template is `cloned` from parent view
view_ids.append(view.inherit_id.id)
Translations = self.pool['ir.translation']
def translate_func(term):
trans = Translations._get_source(cr, uid, 'website', 'view', lang, term, view_ids)
return trans
self._translate_qweb(cr, uid, arch, translate_func, context=context)
return arch
@openerp.tools.ormcache()
def get_view_xmlid(self, cr, uid, id):
imd = self.pool['ir.model.data']
domain = [('model', '=', 'ir.ui.view'), ('res_id', '=', id)]
xmlid = imd.search_read(cr, uid, domain, ['module', 'name'])[0]
return '%s.%s' % (xmlid['module'], xmlid['name'])
@api.cr_uid_ids_context
def render(self, cr, uid, id_or_xml_id, values=None, engine='ir.qweb', context=None):
if isinstance(id_or_xml_id, list):
id_or_xml_id = id_or_xml_id[0]
if not context:
context = {}
if values is None:
values = dict()
qcontext = dict(
env=api.Environment(cr, uid, context),
keep_query=keep_query,
request=request, # might be unbound if we're not in an httprequest context
debug=request.debug if request else False,
json=simplejson,
quote_plus=werkzeug.url_quote_plus,
time=time,
datetime=datetime,
relativedelta=relativedelta,
)
qcontext.update(values)
# TODO: This helper can be used by any template that wants to embedd the backend.
# It is currently necessary because the ir.ui.view bundle inheritance does not
# match the module dependency graph.
def get_modules_order():
if request:
from openerp.addons.web.controllers.main import module_boot
return simplejson.dumps(module_boot())
return '[]'
qcontext['get_modules_order'] = get_modules_order
def loader(name):
return self.read_template(cr, uid, name, context=context)
return self.pool[engine].render(cr, uid, id_or_xml_id, qcontext, loader=loader, context=context)
#------------------------------------------------------
# Misc
#------------------------------------------------------
def graph_get(self, cr, uid, id, model, node_obj, conn_obj, src_node, des_node, label, scale, context=None):
nodes=[]
nodes_name=[]
transitions=[]
start=[]
tres={}
labels={}
no_ancester=[]
blank_nodes = []
_Model_Obj = self.pool[model]
_Node_Obj = self.pool[node_obj]
_Arrow_Obj = self.pool[conn_obj]
for model_key,model_value in _Model_Obj._columns.items():
if model_value._type=='one2many':
if model_value._obj==node_obj:
_Node_Field=model_key
_Model_Field=model_value._fields_id
for node_key,node_value in _Node_Obj._columns.items():
if node_value._type=='one2many':
if node_value._obj==conn_obj:
# _Source_Field = "Incoming Arrows" (connected via des_node)
if node_value._fields_id == des_node:
_Source_Field=node_key
# _Destination_Field = "Outgoing Arrows" (connected via src_node)
if node_value._fields_id == src_node:
_Destination_Field=node_key
datas = _Model_Obj.read(cr, uid, id, [],context)
for a in _Node_Obj.read(cr,uid,datas[_Node_Field],[]):
if a[_Source_Field] or a[_Destination_Field]:
nodes_name.append((a['id'],a['name']))
nodes.append(a['id'])
else:
blank_nodes.append({'id': a['id'],'name':a['name']})
if a.has_key('flow_start') and a['flow_start']:
start.append(a['id'])
else:
if not a[_Source_Field]:
no_ancester.append(a['id'])
for t in _Arrow_Obj.read(cr,uid, a[_Destination_Field],[]):
transitions.append((a['id'], t[des_node][0]))
tres[str(t['id'])] = (a['id'],t[des_node][0])
label_string = ""
if label:
for lbl in eval(label):
if t.has_key(tools.ustr(lbl)) and tools.ustr(t[lbl])=='False':
label_string += ' '
else:
label_string = label_string + " " + tools.ustr(t[lbl])
labels[str(t['id'])] = (a['id'],label_string)
g = graph(nodes, transitions, no_ancester)
g.process(start)
g.scale(*scale)
result = g.result_get()
results = {}
for node in nodes_name:
results[str(node[0])] = result[node[0]]
results[str(node[0])]['name'] = node[1]
return {'nodes': results,
'transitions': tres,
'label' : labels,
'blank_nodes': blank_nodes,
'node_parent_field': _Model_Field,}
def _validate_custom_views(self, cr, uid, model):
"""Validate architecture of custom views (= without xml id) for a given model.
This method is called at the end of registry update.
"""
cr.execute("""SELECT max(v.id)
FROM ir_ui_view v
LEFT JOIN ir_model_data md ON (md.model = 'ir.ui.view' AND md.res_id = v.id)
WHERE md.module IS NULL
AND v.model = %s
AND v.active = true
GROUP BY coalesce(v.inherit_id, v.id)
""", (model,))
ids = map(itemgetter(0), cr.fetchall())
context = dict(load_all_views=True)
return self._check_xml(cr, uid, ids, context=context)
def _validate_module_views(self, cr, uid, module):
"""Validate architecture of all the views of a given module"""
assert not self.pool._init or module in self.pool._init_modules
xmlid_filter = ''
params = (module,)
if self.pool._init:
# only validate the views that are still existing...
xmlid_filter = "AND md.name IN %s"
names = tuple(name for (xmod, name), (model, res_id) in self.pool.model_data_reference_ids.items() if xmod == module and model == self._name)
if not names:
# no views for this module, nothing to validate
return
params += (names,)
cr.execute("""SELECT max(v.id)
FROM ir_ui_view v
LEFT JOIN ir_model_data md ON (md.model = 'ir.ui.view' AND md.res_id = v.id)
WHERE md.module = %s
{0}
GROUP BY coalesce(v.inherit_id, v.id)
""".format(xmlid_filter), params)
for vid, in cr.fetchall():
if not self._check_xml(cr, uid, [vid]):
self.raise_view_error(cr, uid, "Can't validate view", vid)
# vim:et:
| agpl-3.0 |
gabrielfalcao/lettuce | tests/integration/django/dill/leaves/models.py | 18 | 1285 | from django.db import models
class Garden(models.Model):
name = models.CharField(max_length=100)
area = models.IntegerField()
raining = models.BooleanField()
@property
def howbig(self):
if self.area < 50:
return 'small'
elif self.area < 150:
return 'medium'
else:
return 'big'
class Field(models.Model):
name = models.CharField(max_length=100)
class Fruit(models.Model):
name = models.CharField(max_length=100)
garden = models.ForeignKey(Garden)
ripe_by = models.DateField()
fields = models.ManyToManyField(Field)
class Bee(models.Model):
name = models.CharField(max_length=100)
pollinated_fruit = models.ManyToManyField(Fruit,
related_name='pollinated_by')
class Goose(models.Model):
name = models.CharField(max_length=100)
class Meta:
verbose_name_plural = "geese"
class Harvester(models.Model):
make = models.CharField(max_length=100)
rego = models.CharField(max_length=100)
class Panda(models.Model):
"""
Not part of a garden, but still an important part of any good application
"""
name = models.CharField(max_length=100)
location = models.CharField(max_length=100)
| gpl-3.0 |
skk/eche | eche/step2_eval.py | 1 | 1184 | import traceback
import sys
from eche.eche_readline import getline
from eche.reader import read_str, Blank
from eche.printer import print_str
from eche.eval import eval_ast
from eche.env import get_default_env
# noinspection PyPep8Naming
def READ(data):
return read_str(data)
# noinspection PyPep8Naming
def EVAL(ast, env):
return eval_ast(ast, env)
# noinspection PyPep8Naming
def PRINT(exp):
return print_str(exp)
# noinspection PyPep8Naming
def REP(data):
return PRINT(EVAL(READ(data), get_default_env()))
def repl(): # pragma: no cover
while True:
try:
line = getline(prompt_msg='user> ')
if line is None:
break
if line == '':
continue
print(REP(line))
except Blank:
continue
except SyntaxError as e:
print("".join(traceback.format_exception(*sys.exc_info())))
continue
except IOError as e:
print("".join(traceback.format_exception(*sys.exc_info())))
break
return 0
def main(): # pragma: no cover
repl()
if __name__ == "__main__": # pragma: no cover
main()
| mit |
opensemanticsearch/open-semantic-etl | src/opensemanticetl/enhance_xml.py | 1 | 2394 | import xml.etree.ElementTree as ElementTree
import os.path
import sys
class enhance_xml(object):
def elements2data(self, element, data, path="xml"):
path += "/" + element.tag
fieldname = path + '_ss'
text = element.text.strip()
if text:
if fieldname in data:
data[fieldname].append(text)
else:
data[fieldname] = [text]
for child in element:
data = self.elements2data(element=child, path=path, data=data)
return data
# get xml filename by mapping configuration
def get_xml_filename(self, filename, mapping):
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
xmlfilename = mapping
xmlfilename = xmlfilename.replace('%DIRNAME%', dirname)
xmlfilename = xmlfilename.replace('%BASENAME%', dirname)
if not os.path.isfile(xmlfilename):
xmlfilename = False
return xmlfilename
def process(self, parameters=None, data=None):
if parameters is None:
parameters = {}
if data is None:
data = {}
verbose = False
if 'verbose' in parameters:
if parameters['verbose']:
verbose = True
filename = parameters['filename']
mapping = parameters['xml_sidecar_file_mapping']
#
# is there a xml sidecar file?
#
xmlfilename = self.get_xml_filename(filename, mapping)
if verbose:
if xmlfilename:
print('XML sidecar file: {}'.format(xmlfilename))
else:
print("No xml sidecar file")
#
# read meta data from the XML sidecar file
#
if xmlfilename:
if verbose:
print("Reading XML sidecar file: {}".format(xmlfilename))
try:
# Parse the XML file
parser = ElementTree.XMLParser()
et = ElementTree.parse(xmlfilename, parser)
root = et.getroot()
for child in root:
self.elements2data(element=child, path=root.tag, data=data)
except BaseException as e:
sys.stderr.write(
"Exception while parsing XML {} {}".format(xmlfilename, e))
return parameters, data
| gpl-3.0 |
CallaJun/hackprince | indico/matplotlib/axes/_axes.py | 10 | 260820 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import reduce, xrange, zip, zip_longest
import math
import warnings
import numpy as np
from numpy import ma
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.cbook as cbook
from matplotlib.cbook import _string_to_bool, mplDeprecation
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.dates as _ # <-registers a date unit converter
from matplotlib import docstring
import matplotlib.image as mimage
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.markers as mmarkers
import matplotlib.mlab as mlab
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import matplotlib.quiver as mquiver
import matplotlib.stackplot as mstack
import matplotlib.streamplot as mstream
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.tri as mtri
import matplotlib.transforms as mtrans
from matplotlib.container import BarContainer, ErrorbarContainer, StemContainer
from matplotlib.axes._base import _AxesBase
from matplotlib.axes._base import _process_plot_format
iterable = cbook.iterable
is_string_like = cbook.is_string_like
is_sequence_of_strings = cbook.is_sequence_of_strings
# The axes module contains all the wrappers to plotting functions.
# All the other methods should go in the _AxesBase class.
class Axes(_AxesBase):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports callbacks through a callbacks
attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the callback will be called with func(*ax*)
where *ax* is the :class:`Axes` instance.
"""
### Labelling, legend and texts
def get_title(self, loc="center"):
"""Get an axes title.
Get one of the three available axes titles. The available titles
are positioned above the axes in the center, flush with the left
edge, and flush with the right edge.
Parameters
----------
loc : {'center', 'left', 'right'}, str, optional
Which title to get, defaults to 'center'
Returns
-------
title: str
The title text string.
"""
try:
title = {'left': self._left_title,
'center': self.title,
'right': self._right_title}[loc.lower()]
except KeyError:
raise ValueError("'%s' is not a valid location" % loc)
return title.get_text()
@docstring.dedent_interpd
def set_title(self, label, fontdict=None, loc="center", **kwargs):
"""
Set a title for the axes.
Set one of the three available axes titles. The available titles
are positioned above the axes in the center, flush with the left
edge, and flush with the right edge.
Parameters
----------
label : str
Text to use for the title
fontdict : dict
A dictionary controlling the appearance of the title text,
the default `fontdict` is::
{'fontsize': rcParams['axes.titlesize'],
'fontweight' : rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': loc}
loc : {'center', 'left', 'right'}, str, optional
Which title to set, defaults to 'center'
Returns
-------
text : :class:`~matplotlib.text.Text`
The matplotlib text instance representing the title
Other parameters
----------------
kwargs : text properties
Other keyword arguments are text properties, see
:class:`~matplotlib.text.Text` for a list of valid text
properties.
"""
try:
title = {'left': self._left_title,
'center': self.title,
'right': self._right_title}[loc.lower()]
except KeyError:
raise ValueError("'%s' is not a valid location" % loc)
default = {
'fontsize': rcParams['axes.titlesize'],
'fontweight': rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': loc.lower()}
title.set_text(label)
title.update(default)
if fontdict is not None:
title.update(fontdict)
title.update(kwargs)
return title
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
@docstring.dedent_interpd
def set_xlabel(self, xlabel, fontdict=None, labelpad=None, **kwargs):
"""
Set the label for the xaxis.
Parameters
----------
xlabel : string
x label
labelpad : scalar, optional, default: None
spacing in points between the label and the x-axis
Other parameters
----------------
kwargs : `~matplotlib.text.Text` properties
See also
--------
text : for information on how override and the optional args work
"""
if labelpad is not None:
self.xaxis.labelpad = labelpad
return self.xaxis.set_label_text(xlabel, fontdict, **kwargs)
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
@docstring.dedent_interpd
def set_ylabel(self, ylabel, fontdict=None, labelpad=None, **kwargs):
"""
Set the label for the yaxis
Parameters
----------
ylabel : string
y label
labelpad : scalar, optional, default: None
spacing in points between the label and the x-axis
Other parameters
----------------
kwargs : `~matplotlib.text.Text` properties
See also
--------
text : for information on how override and the optional args work
"""
if labelpad is not None:
self.yaxis.labelpad = labelpad
return self.yaxis.set_label_text(ylabel, fontdict, **kwargs)
def _get_legend_handles(self, legend_handler_map=None):
"""
Return a generator of artists that can be used as handles in
a legend.
"""
handles_original = (self.lines + self.patches +
self.collections + self.containers)
handler_map = mlegend.Legend.get_default_handler_map()
if legend_handler_map is not None:
handler_map = handler_map.copy()
handler_map.update(legend_handler_map)
has_handler = mlegend.Legend.get_legend_handler
for handle in handles_original:
label = handle.get_label()
if label != '_nolegend_' and has_handler(handler_map, handle):
yield handle
def get_legend_handles_labels(self, legend_handler_map=None):
"""
Return handles and labels for legend
``ax.legend()`` is equivalent to ::
h, l = ax.get_legend_handles_labels()
ax.legend(h, l)
"""
handles = []
labels = []
for handle in self._get_legend_handles(legend_handler_map):
label = handle.get_label()
if label and not label.startswith('_'):
handles.append(handle)
labels.append(label)
return handles, labels
def legend(self, *args, **kwargs):
"""
Places a legend on the axes.
To make a legend for lines which already exist on the axes
(via plot for instance), simply call this function with an iterable
of strings, one for each legend item. For example::
ax.plot([1, 2, 3])
ax.legend(['A simple line'])
However, in order to keep the "label" and the legend element
instance together, it is preferable to specify the label either at
artist creation, or by calling the
:meth:`~matplotlib.artist.Artist.set_label` method on the artist::
line, = ax.plot([1, 2, 3], label='Inline label')
# Overwrite the label by calling the method.
line.set_label('Label via method')
ax.legend()
Specific lines can be excluded from the automatic legend element
selection by defining a label starting with an underscore.
This is default for all artists, so calling :meth:`legend` without
any arguments and without setting the labels manually will result in
no legend being drawn.
For full control of which artists have a legend entry, it is possible
to pass an iterable of legend artists followed by an iterable of
legend labels respectively::
legend((line1, line2, line3), ('label1', 'label2', 'label3'))
Parameters
----------
loc : int or string or pair of floats, default: 0
The location of the legend. Possible codes are:
=============== =============
Location String Location Code
=============== =============
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== =============
Alternatively can be a 2-tuple giving ``x, y`` of the lower-left
corner of the legend in axes coordinates (in which case
``bbox_to_anchor`` will be ignored).
bbox_to_anchor : :class:`matplotlib.transforms.BboxBase` instance \
or tuple of floats
Specify any arbitrary location for the legend in `bbox_transform`
coordinates (default Axes coordinates).
For example, to put the legend's upper right hand corner in the
center of the axes the following keywords can be used::
loc='upper right', bbox_to_anchor=(0.5, 0.5)
ncol : integer
The number of columns that the legend has. Default is 1.
prop : None or :class:`matplotlib.font_manager.FontProperties` or dict
The font properties of the legend. If None (default), the current
:data:`matplotlib.rcParams` will be used.
fontsize : int or float or {'xx-small', 'x-small', 'small', 'medium',\
'large', 'x-large', 'xx-large'}
Controls the font size of the legend. If the value is numeric the
size will be the absolute font size in points. String values are
relative to the current default font size. This argument is only
used if `prop` is not specified.
numpoints : None or int
The number of marker points in the legend when creating a legend
entry for a line/:class:`matplotlib.lines.Line2D`.
Default is ``None`` which will take the value from the
``legend.numpoints`` :data:`rcParam<matplotlib.rcParams>`.
scatterpoints : None or int
The number of marker points in the legend when creating a legend
entry for a scatter plot/
:class:`matplotlib.collections.PathCollection`.
Default is ``None`` which will take the value from the
``legend.scatterpoints`` :data:`rcParam<matplotlib.rcParams>`.
scatteryoffsets : iterable of floats
The vertical offset (relative to the font size) for the markers
created for a scatter plot legend entry. 0.0 is at the base the
legend text, and 1.0 is at the top. To draw all markers at the
same height, set to ``[0.5]``. Default ``[0.375, 0.5, 0.3125]``.
markerscale : None or int or float
The relative size of legend markers compared with the originally
drawn ones. Default is ``None`` which will take the value from
the ``legend.markerscale`` :data:`rcParam <matplotlib.rcParams>`.
frameon : None or bool
Control whether a frame should be drawn around the legend.
Default is ``None`` which will take the value from the
``legend.frameon`` :data:`rcParam<matplotlib.rcParams>`.
fancybox : None or bool
Control whether round edges should be enabled around
the :class:`~matplotlib.patches.FancyBboxPatch` which
makes up the legend's background.
Default is ``None`` which will take the value from the
``legend.fancybox`` :data:`rcParam<matplotlib.rcParams>`.
shadow : None or bool
Control whether to draw a shadow behind the legend.
Default is ``None`` which will take the value from the
``legend.shadow`` :data:`rcParam<matplotlib.rcParams>`.
framealpha : None or float
Control the alpha transparency of the legend's frame.
Default is ``None`` which will take the value from the
``legend.framealpha`` :data:`rcParam<matplotlib.rcParams>`.
mode : {"expand", None}
If `mode` is set to ``"expand"`` the legend will be horizontally
expanded to fill the axes area (or `bbox_to_anchor` if defines
the legend's size).
bbox_transform : None or :class:`matplotlib.transforms.Transform`
The transform for the bounding box (`bbox_to_anchor`). For a value
of ``None`` (default) the Axes'
:data:`~matplotlib.axes.Axes.transAxes` transform will be used.
title : str or None
The legend's title. Default is no title (``None``).
borderpad : float or None
The fractional whitespace inside the legend border.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.borderpad`` :data:`rcParam<matplotlib.rcParams>`.
labelspacing : float or None
The vertical space between the legend entries.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.labelspacing`` :data:`rcParam<matplotlib.rcParams>`.
handlelength : float or None
The length of the legend handles.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.handlelength`` :data:`rcParam<matplotlib.rcParams>`.
handletextpad : float or None
The pad between the legend handle and text.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.handletextpad`` :data:`rcParam<matplotlib.rcParams>`.
borderaxespad : float or None
The pad between the axes and legend border.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.borderaxespad`` :data:`rcParam<matplotlib.rcParams>`.
columnspacing : float or None
The spacing between columns.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.columnspacing`` :data:`rcParam<matplotlib.rcParams>`.
handler_map : dict or None
The custom dictionary mapping instances or types to a legend
handler. This `handler_map` updates the default handler map
found at :func:`matplotlib.legend.Legend.get_legend_handler_map`.
Notes
-----
Not all kinds of artist are supported by the legend command.
See :ref:`plotting-guide-legend` for details.
Examples
--------
.. plot:: mpl_examples/api/legend_demo.py
"""
handlers = kwargs.get('handler_map', {}) or {}
# Support handles and labels being passed as keywords.
handles = kwargs.pop('handles', None)
labels = kwargs.pop('labels', None)
if handles is not None and labels is None:
labels = [handle.get_label() for handle in handles]
for label, handle in zip(labels[:], handles[:]):
if label.startswith('_'):
warnings.warn('The handle {!r} has a label of {!r} which '
'cannot be automatically added to the '
'legend.'.format(handle, label))
labels.remove(label)
handles.remove(handle)
elif labels is not None and handles is None:
# Get as many handles as there are labels.
handles = [handle for handle, _
in zip(self._get_legend_handles(handlers), labels)]
# No arguments - automatically detect labels and handles.
elif len(args) == 0:
handles, labels = self.get_legend_handles_labels(handlers)
if not handles:
warnings.warn("No labelled objects found. "
"Use label='...' kwarg on individual plots.")
return None
# One argument. User defined labels - automatic handle detection.
elif len(args) == 1:
labels, = args
# Get as many handles as there are labels.
handles = [handle for handle, _
in zip(self._get_legend_handles(handlers), labels)]
# Two arguments. Either:
# * user defined handles and labels
# * user defined labels and location (deprecated)
elif len(args) == 2:
if is_string_like(args[1]) or isinstance(args[1], int):
cbook.warn_deprecated('1.4', 'The "loc" positional argument '
'to legend is deprecated. Please use '
'the "loc" keyword instead.')
labels, loc = args
handles = [handle for handle, _
in zip(self._get_legend_handles(handlers), labels)]
kwargs['loc'] = loc
else:
handles, labels = args
# Three arguments. User defined handles, labels and
# location (deprecated).
elif len(args) == 3:
cbook.warn_deprecated('1.4', 'The "loc" positional argument '
'to legend is deprecated. Please '
'use the "loc" keyword instead.')
handles, labels, loc = args
kwargs['loc'] = loc
else:
raise TypeError('Invalid arguments to legend.')
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
self.legend_._remove_method = lambda h: setattr(self, 'legend_', None)
return self.legend_
def text(self, x, y, s, fontdict=None,
withdash=False, **kwargs):
"""
Add text to the axes.
Add text in string `s` to axis at location `x`, `y`, data
coordinates.
Parameters
----------
x, y : scalars
data coordinates
s : string
text
fontdict : dictionary, optional, default: None
A dictionary to override the default text properties. If fontdict
is None, the defaults are determined by your rc parameters.
withdash : boolean, optional, default: False
Creates a `~matplotlib.text.TextWithDash` instance instead of a
`~matplotlib.text.Text` instance.
Other parameters
----------------
kwargs : `~matplotlib.text.Text` properties.
Other miscellaneous text parameters.
Examples
--------
Individual keyword arguments can be used to override any given
parameter::
>>> text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
>>> text(0.5, 0.5,'matplotlib', horizontalalignment='center',
... verticalalignment='center',
... transform=ax.transAxes)
You can put a rectangular box around the text instance (e.g., to
set a background color) by using the keyword `bbox`. `bbox` is
a dictionary of `~matplotlib.patches.Rectangle`
properties. For example::
>>> text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
"""
default = {
'verticalalignment': 'baseline',
'horizontalalignment': 'left',
'transform': self.transData,
'clip_on': False}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliminate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if withdash:
t = mtext.TextWithDash(
x=x, y=y, text=s)
else:
t = mtext.Text(
x=x, y=y, text=s)
self._set_artist_props(t)
t.update(default)
if fontdict is not None:
t.update(fontdict)
t.update(kwargs)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
t.set_clip_path(self.patch)
return t
@docstring.dedent_interpd
def annotate(self, *args, **kwargs):
"""
Create an annotation: a piece of text referring to a data
point.
Parameters
----------
s : string
label
xy : (x, y)
position of element to annotate
xytext : (x, y) , optional, default: None
position of the label `s`
xycoords : string, optional, default: "data"
string that indicates what type of coordinates `xy` is. Examples:
"figure points", "figure pixels", "figure fraction", "axes
points", .... See `matplotlib.text.Annotation` for more details.
textcoords : string, optional
string that indicates what type of coordinates `text` is. Examples:
"figure points", "figure pixels", "figure fraction", "axes
points", .... See `matplotlib.text.Annotation` for more details.
Default is None.
arrowprops : `matplotlib.lines.Line2D` properties, optional
Dictionary of line properties for the arrow that connects
the annotation to the point. If the dictionnary has a key
`arrowstyle`, a `~matplotlib.patches.FancyArrowPatch`
instance is created and drawn. See
`matplotlib.text.Annotation` for more details on valid
options. Default is None.
Returns
-------
a : `~matplotlib.text.Annotation`
Notes
-----
%(Annotation)s
Examples
--------
.. plot:: mpl_examples/pylab_examples/annotation_demo2.py
"""
a = mtext.Annotation(*args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
self._set_artist_props(a)
if 'clip_on' in kwargs:
a.set_clip_path(self.patch)
self.texts.append(a)
a._remove_method = lambda h: self.texts.remove(h)
return a
#### Lines and spans
@docstring.dedent_interpd
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
Add a horizontal line across the axis.
Parameters
----------
y : scalar, optional, default: 0
y position in data coordinates of the horizontal line.
xmin : scalar, optional, default: 0
Should be between 0 and 1, 0 being the far left of the plot, 1 the
far right of the plot.
xmax : scalar, optional, default: 1
Should be between 0 and 1, 0 being the far left of the plot, 1 the
far right of the plot.
Returns
-------
`~matplotlib.lines.Line2D`
Notes
-----
kwargs are the same as kwargs to plot, and can be
used to control the line properties. e.g.,
Examples
--------
* draw a thick red hline at 'y' = 0 that spans the xrange::
>>> axhline(linewidth=4, color='r')
* draw a default hline at 'y' = 1 that spans the xrange::
>>> axhline(y=1)
* draw a default hline at 'y' = .5 that spans the the middle half of
the xrange::
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
with the exception of 'transform':
%(Line2D)s
See also
--------
axhspan : for example plot and source code
"""
if "transform" in kwargs:
raise ValueError(
"'transform' is not allowed as a kwarg;"
+ "axhline generates its own transform.")
ymin, ymax = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
self._process_unit_info(ydata=y, kwargs=kwargs)
yy = self.convert_yunits(y)
scaley = (yy < ymin) or (yy > ymax)
trans = self.get_yaxis_transform(which='grid')
l = mlines.Line2D([xmin, xmax], [y, y], transform=trans, **kwargs)
self.add_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
@docstring.dedent_interpd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
Add a vertical line across the axes.
Parameters
----------
x : scalar, optional, default: 0
x position in data coordinates of the vertical line.
ymin : scalar, optional, default: 0
Should be between 0 and 1, 0 being the far left of the plot, 1 the
far right of the plot.
ymax : scalar, optional, default: 1
Should be between 0 and 1, 0 being the far left of the plot, 1 the
far right of the plot.
Returns
-------
`~matplotlib.lines.Line2D`
Examples
---------
* draw a thick red vline at *x* = 0 that spans the yrange::
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange::
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the the middle half of
the yrange::
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
with the exception of 'transform':
%(Line2D)s
See also
--------
axhspan : for example plot and source code
"""
if "transform" in kwargs:
raise ValueError(
"'transform' is not allowed as a kwarg;"
+ "axvline generates its own transform.")
xmin, xmax = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
self._process_unit_info(xdata=x, kwargs=kwargs)
xx = self.convert_xunits(x)
scalex = (xx < xmin) or (xx > xmax)
trans = self.get_xaxis_transform(which='grid')
l = mlines.Line2D([x, x], [ymin, ymax], transform=trans, **kwargs)
self.add_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
@docstring.dedent_interpd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
Add a horizontal span (rectangle) across the axis.
Call signature::
axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)
*y* coords are in data units and *x* coords are in axes (relative
0-1) units.
Draw a horizontal span (rectangle) from *ymin* to *ymax*.
With the default values of *xmin* = 0 and *xmax* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, e.g., with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Return value is a :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a gray rectangle from *y* = 0.25-0.75 that spans the
horizontal extent of the axes::
>>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/axhspan_demo.py
"""
trans = self.get_yaxis_transform(which='grid')
# process the unit information
self._process_unit_info([xmin, xmax], [ymin, ymax], kwargs=kwargs)
# first we need to strip away the units
xmin, xmax = self.convert_xunits([xmin, xmax])
ymin, ymax = self.convert_yunits([ymin, ymax])
verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
self.add_patch(p)
self.autoscale_view(scalex=False)
return p
@docstring.dedent_interpd
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
Add a vertical span (rectangle) across the axes.
Call signature::
axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)
*x* coords are in data units and *y* coords are in axes (relative
0-1) units.
Draw a vertical span (rectangle) from *xmin* to *xmax*. With
the default values of *ymin* = 0 and *ymax* = 1, this always
spans the yrange, regardless of the ylim settings, even if you
change them, e.g., with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the *y* location is in data coordinates.
Return value is the :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a vertical green translucent rectangle from x=1.25 to 1.55 that
spans the yrange of the axes::
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon`
properties:
%(Polygon)s
.. seealso::
:meth:`axhspan`
for example plot and source code
"""
trans = self.get_xaxis_transform(which='grid')
# process the unit information
self._process_unit_info([xmin, xmax], [ymin, ymax], kwargs=kwargs)
# first we need to strip away the units
xmin, xmax = self.convert_xunits([xmin, xmax])
ymin, ymax = self.convert_yunits([ymin, ymax])
verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
self.add_patch(p)
self.autoscale_view(scaley=False)
return p
@docstring.dedent
def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
label='', **kwargs):
"""
Plot horizontal lines at each `y` from `xmin` to `xmax`.
Parameters
----------
y : scalar or sequence of scalar
y-indexes where to plot the lines.
xmin, xmax : scalar or 1D array_like
Respective beginning and end of each line. If scalars are
provided, all lines will have same length.
colors : array_like of colors, optional, default: 'k'
linestyles : ['solid' | 'dashed' | 'dashdot' | 'dotted'], optional
label : string, optional, default: ''
Returns
-------
lines : `~matplotlib.collections.LineCollection`
Other parameters
----------------
kwargs : `~matplotlib.collections.LineCollection` properties.
See also
--------
vlines : vertical lines
Examples
--------
.. plot:: mpl_examples/pylab_examples/vline_hline_demo.py
"""
# We do the conversion first since not all unitized data is uniform
# process the unit information
self._process_unit_info([xmin, xmax], y, kwargs=kwargs)
y = self.convert_yunits(y)
xmin = self.convert_xunits(xmin)
xmax = self.convert_xunits(xmax)
if not iterable(y):
y = [y]
if not iterable(xmin):
xmin = [xmin]
if not iterable(xmax):
xmax = [xmax]
y = np.ravel(y)
xmin = np.resize(xmin, y.shape)
xmax = np.resize(xmax, y.shape)
verts = [((thisxmin, thisy), (thisxmax, thisy))
for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll, autolim=False)
coll.update(kwargs)
if len(y) > 0:
minx = min(xmin.min(), xmax.min())
maxx = max(xmin.max(), xmax.max())
miny = y.min()
maxy = y.max()
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
@docstring.dedent_interpd
def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
label='', **kwargs):
"""
Plot vertical lines.
Plot vertical lines at each `x` from `ymin` to `ymax`.
Parameters
----------
x : scalar or 1D array_like
x-indexes where to plot the lines.
ymin, ymax : scalar or 1D array_like
Respective beginning and end of each line. If scalars are
provided, all lines will have same length.
colors : array_like of colors, optional, default: 'k'
linestyles : ['solid' | 'dashed' | 'dashdot' | 'dotted'], optional
label : string, optional, default: ''
Returns
-------
lines : `~matplotlib.collections.LineCollection`
Other parameters
----------------
kwargs : `~matplotlib.collections.LineCollection` properties.
See also
--------
hlines : horizontal lines
Examples
---------
.. plot:: mpl_examples/pylab_examples/vline_hline_demo.py
"""
self._process_unit_info(xdata=x, ydata=[ymin, ymax], kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
x = self.convert_xunits(x)
ymin = self.convert_yunits(ymin)
ymax = self.convert_yunits(ymax)
if not iterable(x):
x = [x]
if not iterable(ymin):
ymin = [ymin]
if not iterable(ymax):
ymax = [ymax]
x = np.ravel(x)
ymin = np.resize(ymin, x.shape)
ymax = np.resize(ymax, x.shape)
verts = [((thisx, thisymin), (thisx, thisymax))
for thisx, thisymin, thisymax in zip(x, ymin, ymax)]
#print 'creating line collection'
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll, autolim=False)
coll.update(kwargs)
if len(x) > 0:
minx = min(x)
maxx = max(x)
miny = min(min(ymin), min(ymax))
maxy = max(max(ymin), max(ymax))
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
@docstring.dedent_interpd
def eventplot(self, positions, orientation='horizontal', lineoffsets=1,
linelengths=1, linewidths=None, colors=None,
linestyles='solid', **kwargs):
"""
Plot identical parallel lines at specific positions.
Call signature::
eventplot(positions, orientation='horizontal', lineoffsets=0,
linelengths=1, linewidths=None, color =None,
linestyles='solid'
Plot parallel lines at the given positions. positions should be a 1D
or 2D array-like object, with each row corresponding to a row or column
of lines.
This type of plot is commonly used in neuroscience for representing
neural events, where it is commonly called a spike raster, dot raster,
or raster plot.
However, it is useful in any situation where you wish to show the
timing or position of multiple sets of discrete events, such as the
arrival times of people to a business on each day of the month or the
date of hurricanes each year of the last century.
*orientation* : [ 'horizonal' | 'vertical' ]
'horizonal' : the lines will be vertical and arranged in rows
"vertical' : lines will be horizontal and arranged in columns
*lineoffsets* :
A float or array-like containing floats.
*linelengths* :
A float or array-like containing floats.
*linewidths* :
A float or array-like containing floats.
*colors*
must be a sequence of RGBA tuples (e.g., arbitrary color
strings, etc, not allowed) or a list of such sequences
*linestyles* :
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ] or an array of these
values
For linelengths, linewidths, colors, and linestyles, if only a single
value is given, that value is applied to all lines. If an array-like
is given, it must have the same length as positions, and each value
will be applied to the corresponding row or column in positions.
Returns a list of :class:`matplotlib.collections.EventCollection`
objects that were added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
**Example:**
.. plot:: mpl_examples/pylab_examples/eventplot_demo.py
"""
self._process_unit_info(xdata=positions,
ydata=[lineoffsets, linelengths],
kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
positions = self.convert_xunits(positions)
lineoffsets = self.convert_yunits(lineoffsets)
linelengths = self.convert_yunits(linelengths)
if not iterable(positions):
positions = [positions]
elif any(iterable(position) for position in positions):
positions = [np.asanyarray(position) for position in positions]
else:
positions = [np.asanyarray(positions)]
if len(positions) == 0:
return []
if not iterable(lineoffsets):
lineoffsets = [lineoffsets]
if not iterable(linelengths):
linelengths = [linelengths]
if not iterable(linewidths):
linewidths = [linewidths]
if not iterable(colors):
colors = [colors]
if hasattr(linestyles, 'lower') or not iterable(linestyles):
linestyles = [linestyles]
lineoffsets = np.asarray(lineoffsets)
linelengths = np.asarray(linelengths)
linewidths = np.asarray(linewidths)
if len(lineoffsets) == 0:
lineoffsets = [None]
if len(linelengths) == 0:
linelengths = [None]
if len(linewidths) == 0:
lineoffsets = [None]
if len(linewidths) == 0:
lineoffsets = [None]
if len(colors) == 0:
colors = [None]
if len(lineoffsets) == 1 and len(positions) != 1:
lineoffsets = np.tile(lineoffsets, len(positions))
lineoffsets[0] = 0
lineoffsets = np.cumsum(lineoffsets)
if len(linelengths) == 1:
linelengths = np.tile(linelengths, len(positions))
if len(linewidths) == 1:
linewidths = np.tile(linewidths, len(positions))
if len(colors) == 1:
colors = list(colors)
colors = colors * len(positions)
if len(linestyles) == 1:
linestyles = [linestyles] * len(positions)
if len(lineoffsets) != len(positions):
raise ValueError('lineoffsets and positions are unequal sized '
'sequences')
if len(linelengths) != len(positions):
raise ValueError('linelengths and positions are unequal sized '
'sequences')
if len(linewidths) != len(positions):
raise ValueError('linewidths and positions are unequal sized '
'sequences')
if len(colors) != len(positions):
raise ValueError('colors and positions are unequal sized '
'sequences')
if len(linestyles) != len(positions):
raise ValueError('linestyles and positions are unequal sized '
'sequences')
colls = []
for position, lineoffset, linelength, linewidth, color, linestyle in \
zip(positions, lineoffsets, linelengths, linewidths,
colors, linestyles):
coll = mcoll.EventCollection(position,
orientation=orientation,
lineoffset=lineoffset,
linelength=linelength,
linewidth=linewidth,
color=color,
linestyle=linestyle)
self.add_collection(coll, autolim=False)
coll.update(kwargs)
colls.append(coll)
if len(positions) > 0:
# try to get min/max
min_max = [(np.min(_p), np.max(_p)) for _p in positions
if len(_p) > 0]
# if we have any non-empty positions, try to autoscale
if len(min_max) > 0:
mins, maxes = zip(*min_max)
minpos = np.min(mins)
maxpos = np.max(maxes)
minline = (lineoffsets - linelengths).min()
maxline = (lineoffsets + linelengths).max()
if colls[0].is_horizontal():
corners = (minpos, minline), (maxpos, maxline)
else:
corners = (minline, minpos), (maxline, maxpos)
self.update_datalim(corners)
self.autoscale_view()
return colls
#### Basic plotting
@docstring.dedent_interpd
def plot(self, *args, **kwargs):
"""
Plot lines and/or markers to the
:class:`~matplotlib.axes.Axes`. *args* is a variable length
argument, allowing for multiple *x*, *y* pairs with an
optional format string. For example, each of the following is
legal::
plot(x, y) # plot x and y using default line style and color
plot(x, y, 'bo') # plot x and y using blue circle markers
plot(y) # plot y using x as index array 0..N-1
plot(y, 'r+') # ditto, but with red plusses
If *x* and/or *y* is 2-dimensional, then the corresponding columns
will be plotted.
An arbitrary number of *x*, *y*, *fmt* groups can be
specified, as in::
a.plot(x1, y1, 'g^', x2, y2, 'g-')
Return value is a list of lines that were added.
By default, each line is assigned a different color specified by a
'color cycle'. To change this behavior, you can edit the
axes.color_cycle rcParam.
The following format string characters are accepted to control
the line style or marker:
================ ===============================
character description
================ ===============================
``'-'`` solid line style
``'--'`` dashed line style
``'-.'`` dash-dot line style
``':'`` dotted line style
``'.'`` point marker
``','`` pixel marker
``'o'`` circle marker
``'v'`` triangle_down marker
``'^'`` triangle_up marker
``'<'`` triangle_left marker
``'>'`` triangle_right marker
``'1'`` tri_down marker
``'2'`` tri_up marker
``'3'`` tri_left marker
``'4'`` tri_right marker
``'s'`` square marker
``'p'`` pentagon marker
``'*'`` star marker
``'h'`` hexagon1 marker
``'H'`` hexagon2 marker
``'+'`` plus marker
``'x'`` x marker
``'D'`` diamond marker
``'d'`` thin_diamond marker
``'|'`` vline marker
``'_'`` hline marker
================ ===============================
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In addition, you can specify colors in many weird and
wonderful ways, including full names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``). Of these, the
string specifications can be used in place of a ``fmt`` group,
but the tuple forms can be used only as ``kwargs``.
Line styles and colors are combined in a single format string, as in
``'bo'`` for blue circles.
The *kwargs* can be used to set line properties (any property that has
a ``set_*`` method). You can use this to set a line label (for auto
legends), linewidth, anitialising, marker face color, etc. Here is an
example::
plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
plot([1,2,3], [1,4,9], 'rs', label='line 2')
axis([0, 4, 0, 10])
legend()
If you make multiple lines with one plot command, the kwargs
apply to all those lines, e.g.::
plot(x1, y1, x2, y2, antialised=False)
Neither line will be antialiased.
You do not need to use format strings, which are just
abbreviations. All of the line properties can be controlled
by keyword arguments. For example, you can set the color,
marker, linestyle, and markercolor with::
plot(x, y, color='green', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=12).
See :class:`~matplotlib.lines.Line2D` for details.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
kwargs *scalex* and *scaley*, if defined, are passed on to
:meth:`~matplotlib.axes.Axes.autoscale_view` to determine
whether the *x* and *y* axes are autoscaled; the default is
*True*.
"""
scalex = kwargs.pop('scalex', True)
scaley = kwargs.pop('scaley', True)
if not self._hold:
self.cla()
lines = []
for line in self._get_lines(*args, **kwargs):
self.add_line(line)
lines.append(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
@docstring.dedent_interpd
def plot_date(self, x, y, fmt='o', tz=None, xdate=True, ydate=False,
**kwargs):
"""
Plot with data with dates.
Call signature::
plot_date(x, y, fmt='bo', tz=None, xdate=True,
ydate=False, **kwargs)
Similar to the :func:`~matplotlib.pyplot.plot` command, except
the *x* or *y* (or both) data is considered to be dates, and the
axis is labeled accordingly.
*x* and/or *y* can be a sequence of dates represented as float
days since 0001-01-01 UTC.
Keyword arguments:
*fmt*: string
The plot format string.
*tz*: [ *None* | timezone string | :class:`tzinfo` instance]
The time zone to use in labeling dates. If *None*, defaults to rc
value.
*xdate*: [ *True* | *False* ]
If *True*, the *x*-axis will be labeled with dates.
*ydate*: [ *False* | *True* ]
If *True*, the *y*-axis will be labeled with dates.
Note if you are using custom date tickers and formatters, it
may be necessary to set the formatters/locators after the call
to :meth:`plot_date` since :meth:`plot_date` will set the
default tick locator to
:class:`matplotlib.dates.AutoDateLocator` (if the tick
locator is not already set to a
:class:`matplotlib.dates.DateLocator` instance) and the
default tick formatter to
:class:`matplotlib.dates.AutoDateFormatter` (if the tick
formatter is not already set to a
:class:`matplotlib.dates.DateFormatter` instance).
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:mod:`~matplotlib.dates` for helper functions
:func:`~matplotlib.dates.date2num`,
:func:`~matplotlib.dates.num2date` and
:func:`~matplotlib.dates.drange` for help on creating the required
floating point dates.
"""
if not self._hold:
self.cla()
ret = self.plot(x, y, fmt, **kwargs)
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
self.autoscale_view()
return ret
@docstring.dedent_interpd
def loglog(self, *args, **kwargs):
"""
Make a plot with log scaling on both the *x* and *y* axis.
Call signature::
loglog(*args, **kwargs)
:func:`~matplotlib.pyplot.loglog` supports all the keyword
arguments of :func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basex*/*basey*: scalar > 1
Base of the *x*/*y* logarithm
*subsx*/*subsy*: [ *None* | sequence ]
The location of the minor *x*/*y* ticks; *None* defaults
to autosubs, which depend on the number of decades in the
plot; see :meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale` for details
*nonposx*/*nonposy*: ['mask' | 'clip' ]
Non-positive values in *x* or *y* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/log_demo.py
"""
if not self._hold:
self.cla()
dx = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
'nonposx': kwargs.pop('nonposx', 'mask'),
}
dy = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
'nonposy': kwargs.pop('nonposy', 'mask'),
}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def semilogx(self, *args, **kwargs):
"""
Make a plot with log scaling on the *x* axis.
Call signature::
semilogx(*args, **kwargs)
:func:`semilogx` supports all the keyword arguments of
:func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale`.
Notable keyword arguments:
*basex*: scalar > 1
Base of the *x* logarithm
*subsx*: [ *None* | sequence ]
The location of the minor xticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for
details.
*nonposx*: [ 'mask' | 'clip' ]
Non-positive values in *x* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`
For example code and figure
"""
if not self._hold:
self.cla()
d = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
'nonposx': kwargs.pop('nonposx', 'mask'),
}
self.set_xscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def semilogy(self, *args, **kwargs):
"""
Make a plot with log scaling on the *y* axis.
call signature::
semilogy(*args, **kwargs)
:func:`semilogy` supports all the keyword arguments of
:func:`~matplotlib.pylab.plot` and
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basey*: scalar > 1
Base of the *y* logarithm
*subsy*: [ *None* | sequence ]
The location of the minor yticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for
details.
*nonposy*: [ 'mask' | 'clip' ]
Non-positive values in *y* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`
For example code and figure
"""
if not self._hold:
self.cla()
d = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
'nonposy': kwargs.pop('nonposy', 'mask'),
}
self.set_yscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def acorr(self, x, **kwargs):
"""
Plot the autocorrelation of `x`.
Parameters
----------
x : sequence of scalar
hold : boolean, optional, default: True
detrend : callable, optional, default: `mlab.detrend_none`
x is detrended by the `detrend` callable. Default is no
normalization.
normed : boolean, optional, default: True
if True, normalize the data by the autocorrelation at the 0-th
lag.
usevlines : boolean, optional, default: True
if True, Axes.vlines is used to plot the vertical lines from the
origin to the acorr. Otherwise, Axes.plot is used.
maxlags : integer, optional, default: 10
number of lags to show. If None, will return all 2 * len(x) - 1
lags.
Returns
-------
(lags, c, line, b) : where:
- `lags` are a length 2`maxlags+1 lag vector.
- `c` is the 2`maxlags+1 auto correlation vectorI
- `line` is a `~matplotlib.lines.Line2D` instance returned by
`plot`.
- `b` is the x-axis.
Other parameters
-----------------
linestyle : `~matplotlib.lines.Line2D` prop, optional, default: None
Only used if usevlines is False.
marker : string, optional, default: 'o'
Notes
-----
The cross correlation is performed with :func:`numpy.correlate` with
`mode` = 2.
Examples
--------
`~matplotlib.pyplot.xcorr` is top graph, and
`~matplotlib.pyplot.acorr` is bottom graph.
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
@docstring.dedent_interpd
def xcorr(self, x, y, normed=True, detrend=mlab.detrend_none,
usevlines=True, maxlags=10, **kwargs):
"""
Plot the cross correlation between *x* and *y*.
Parameters
----------
x : sequence of scalars of length n
y : sequence of scalars of length n
hold : boolean, optional, default: True
detrend : callable, optional, default: `mlab.detrend_none`
x is detrended by the `detrend` callable. Default is no
normalization.
normed : boolean, optional, default: True
if True, normalize the data by the autocorrelation at the 0-th
lag.
usevlines : boolean, optional, default: True
if True, Axes.vlines is used to plot the vertical lines from the
origin to the acorr. Otherwise, Axes.plot is used.
maxlags : integer, optional, default: 10
number of lags to show. If None, will return all 2 * len(x) - 1
lags.
Returns
-------
(lags, c, line, b) : where:
- `lags` are a length 2`maxlags+1 lag vector.
- `c` is the 2`maxlags+1 auto correlation vectorI
- `line` is a `~matplotlib.lines.Line2D` instance returned by
`plot`.
- `b` is the x-axis (none, if plot is used).
Other parameters
-----------------
linestyle : `~matplotlib.lines.Line2D` prop, optional, default: None
Only used if usevlines is False.
marker : string, optional, default: 'o'
Notes
-----
The cross correlation is performed with :func:`numpy.correlate` with
`mode` = 2.
"""
Nx = len(x)
if Nx != len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed:
c /= np.sqrt(np.dot(x, x) * np.dot(y, y))
if maxlags is None:
maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d' % Nx)
lags = np.arange(-maxlags, maxlags + 1)
c = c[Nx - 1 - maxlags:Nx + maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
#### Specialized plotting
def step(self, x, y, *args, **kwargs):
"""
Make a step plot.
Call signature::
step(x, y, *args, **kwargs)
Additional keyword args to :func:`step` are the same as those
for :func:`~matplotlib.pyplot.plot`.
*x* and *y* must be 1-D sequences, and it is assumed, but not checked,
that *x* is uniformly increasing.
Keyword arguments:
*where*: [ 'pre' | 'post' | 'mid' ]
If 'pre', the interval from x[i] to x[i+1] has level y[i+1]
If 'post', that interval has level y[i]
If 'mid', the jumps in *y* occur half-way between the
*x*-values.
"""
where = kwargs.pop('where', 'pre')
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
usr_linestyle = kwargs.pop('linestyle', '')
kwargs['linestyle'] = 'steps-' + where + usr_linestyle
return self.plot(x, y, *args, **kwargs)
@docstring.dedent_interpd
def bar(self, left, height, width=0.8, bottom=None, **kwargs):
"""
Make a bar plot.
Make a bar plot with rectangles bounded by:
`left`, `left` + `width`, `bottom`, `bottom` + `height`
(left, right, bottom and top edges)
Parameters
----------
left : sequence of scalars
the x coordinates of the left sides of the bars
height : sequence of scalars
the heights of the bars
width : scalar or array-like, optional, default: 0.8
the width(s) of the bars
bottom : scalar or array-like, optional, default: None
the y coordinate(s) of the bars
color : scalar or array-like, optional
the colors of the bar faces
edgecolor : scalar or array-like, optional
the colors of the bar edges
linewidth : scalar or array-like, optional, default: None
width of bar edge(s). If None, use default
linewidth; If 0, don't draw edges.
xerr : scalar or array-like, optional, default: None
if not None, will be used to generate errorbar(s) on the bar chart
yerr : scalar or array-like, optional, default: None
if not None, will be used to generate errorbar(s) on the bar chart
ecolor : scalar or array-like, optional, default: None
specifies the color of errorbar(s)
capsize : integer, optional, default: 3
determines the length in points of the error bar caps
error_kw :
dictionary of kwargs to be passed to errorbar method. *ecolor* and
*capsize* may be specified here rather than as independent kwargs.
align : ['edge' | 'center'], optional, default: 'edge'
If `edge`, aligns bars by their left edges (for vertical bars) and
by their bottom edges (for horizontal bars). If `center`, interpret
the `left` argument as the coordinates of the centers of the bars.
orientation : 'vertical' | 'horizontal', optional, default: 'vertical'
The orientation of the bars.
log : boolean, optional, default: False
If true, sets the axis to be log scale
Returns
-------
`matplotlib.patches.Rectangle` instances.
Notes
-----
The optional arguments `color`, `edgecolor`, `linewidth`,
`xerr`, and `yerr` can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Detail: `xerr` and `yerr` are passed directly to
:meth:`errorbar`, so they can also have shape 2xN for
independent specification of lower and upper errors.
Other optional kwargs:
%(Rectangle)s
See also
--------
barh: Plot a horizontal bar plot.
Examples
--------
**Example:** A stacked bar chart.
.. plot:: mpl_examples/pylab_examples/bar_stacked.py
"""
if not self._hold:
self.cla()
color = kwargs.pop('color', None)
edgecolor = kwargs.pop('edgecolor', None)
linewidth = kwargs.pop('linewidth', None)
# Because xerr and yerr will be passed to errorbar,
# most dimension checking and processing will be left
# to the errorbar method.
xerr = kwargs.pop('xerr', None)
yerr = kwargs.pop('yerr', None)
error_kw = kwargs.pop('error_kw', dict())
ecolor = kwargs.pop('ecolor', None)
capsize = kwargs.pop('capsize', 3)
error_kw.setdefault('ecolor', ecolor)
error_kw.setdefault('capsize', capsize)
align = kwargs.pop('align', 'edge')
orientation = kwargs.pop('orientation', 'vertical')
log = kwargs.pop('log', False)
label = kwargs.pop('label', '')
def make_iterable(x):
if not iterable(x):
return [x]
else:
return x
# make them safe to take len() of
_left = left
left = make_iterable(left)
height = make_iterable(height)
width = make_iterable(width)
_bottom = bottom
bottom = make_iterable(bottom)
linewidth = make_iterable(linewidth)
adjust_ylim = False
adjust_xlim = False
if orientation == 'vertical':
self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log', nonposy='clip')
# size width and bottom according to length of left
if _bottom is None:
if self.get_yscale() == 'log':
adjust_ylim = True
bottom = [0]
nbars = len(left)
if len(width) == 1:
width *= nbars
if len(bottom) == 1:
bottom *= nbars
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)
if log:
self.set_xscale('log', nonposx='clip')
# size left and height according to length of bottom
if _left is None:
if self.get_xscale() == 'log':
adjust_xlim = True
left = [0]
nbars = len(bottom)
if len(left) == 1:
left *= nbars
if len(height) == 1:
height *= nbars
else:
raise ValueError('invalid orientation: %s' % orientation)
if len(linewidth) < nbars:
linewidth *= nbars
if color is None:
color = [None] * nbars
else:
color = list(mcolors.colorConverter.to_rgba_array(color))
if len(color) == 0: # until to_rgba_array is changed
color = [[0, 0, 0, 0]]
if len(color) < nbars:
color *= nbars
if edgecolor is None:
edgecolor = [None] * nbars
else:
edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))
if len(edgecolor) == 0: # until to_rgba_array is changed
edgecolor = [[0, 0, 0, 0]]
if len(edgecolor) < nbars:
edgecolor *= nbars
# FIXME: convert the following to proper input validation
# raising ValueError; don't use assert for this.
assert len(left) == nbars, ("incompatible sizes: argument 'left' must "
"be length %d or scalar" % nbars)
assert len(height) == nbars, ("incompatible sizes: argument 'height' "
"must be length %d or scalar" %
nbars)
assert len(width) == nbars, ("incompatible sizes: argument 'width' "
"must be length %d or scalar" %
nbars)
assert len(bottom) == nbars, ("incompatible sizes: argument 'bottom' "
"must be length %d or scalar" %
nbars)
patches = []
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
left = self.convert_xunits(left)
width = self.convert_xunits(width)
if xerr is not None:
xerr = self.convert_xunits(xerr)
if self.yaxis is not None:
bottom = self.convert_yunits(bottom)
height = self.convert_yunits(height)
if yerr is not None:
yerr = self.convert_yunits(yerr)
if align == 'edge':
pass
elif align == 'center':
if orientation == 'vertical':
left = [left[i] - width[i] / 2. for i in xrange(len(left))]
elif orientation == 'horizontal':
bottom = [bottom[i] - height[i] / 2.
for i in xrange(len(bottom))]
else:
raise ValueError('invalid alignment: %s' % align)
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
if h < 0:
b += h
h = abs(h)
if w < 0:
l += w
w = abs(w)
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label='_nolegend_'
)
r.update(kwargs)
r.get_path()._interpolation_steps = 100
#print r.get_label(), label, 'label' in kwargs
self.add_patch(r)
patches.append(r)
holdstate = self._hold
self.hold(True) # ensure hold is on before plotting errorbars
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
x = [l + 0.5 * w for l, w in zip(left, width)]
y = [b + h for b, h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than arrays to preserve unit info
x = [l + w for l, w in zip(left, width)]
y = [b + 0.5 * h for b, h in zip(bottom, height)]
if "label" not in error_kw:
error_kw["label"] = '_nolegend_'
errorbar = self.errorbar(x, y,
yerr=yerr, xerr=xerr,
fmt='none', **error_kw)
else:
errorbar = None
self.hold(holdstate) # restore previous hold state
if adjust_xlim:
xmin, xmax = self.dataLim.intervalx
xmin = np.amin([w for w in width if w > 0])
if xerr is not None:
xmin = xmin - np.amax(xerr)
xmin = max(xmin * 0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
if adjust_ylim:
ymin, ymax = self.dataLim.intervaly
ymin = np.amin([h for h in height if h > 0])
if yerr is not None:
ymin = ymin - np.amax(yerr)
ymin = max(ymin * 0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
bar_container = BarContainer(patches, errorbar, label=label)
self.add_container(bar_container)
return bar_container
@docstring.dedent_interpd
def barh(self, bottom, width, height=0.8, left=None, **kwargs):
"""
Make a horizontal bar plot.
Make a horizontal bar plot with rectangles bounded by:
`left`, `left` + `width`, `bottom`, `bottom` + `height`
(left, right, bottom and top edges)
`bottom`, `width`, `height`, and `left` can be either scalars
or sequences
Parameters
----------
bottom : scalar or array-like
the y coordinate(s) of the bars
width : scalar or array-like
the width(s) of the bars
height : sequence of scalars, optional, default: 0.8
the heights of the bars
left : sequence of scalars
the x coordinates of the left sides of the bars
Returns
--------
`matplotlib.patches.Rectangle` instances.
Other parameters
----------------
color : scalar or array-like, optional
the colors of the bars
edgecolor : scalar or array-like, optional
the colors of the bar edges
linewidth : scalar or array-like, optional, default: None
width of bar edge(s). If None, use default
linewidth; If 0, don't draw edges.
xerr : scalar or array-like, optional, default: None
if not None, will be used to generate errorbar(s) on the bar chart
yerr : scalar or array-like, optional, default: None
if not None, will be used to generate errorbar(s) on the bar chart
ecolor : scalar or array-like, optional, default: None
specifies the color of errorbar(s)
capsize : integer, optional, default: 3
determines the length in points of the error bar caps
error_kw :
dictionary of kwargs to be passed to errorbar method. `ecolor` and
`capsize` may be specified here rather than as independent kwargs.
align : ['edge' | 'center'], optional, default: 'edge'
If `edge`, aligns bars by their left edges (for vertical bars) and
by their bottom edges (for horizontal bars). If `center`, interpret
the `left` argument as the coordinates of the centers of the bars.
orientation : 'vertical' | 'horizontal', optional, default: 'vertical'
The orientation of the bars.
log : boolean, optional, default: False
If true, sets the axis to be log scale
Notes
-----
The optional arguments `color`, `edgecolor`, `linewidth`,
`xerr`, and `yerr` can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Detail: `xerr` and `yerr` are passed directly to
:meth:`errorbar`, so they can also have shape 2xN for
independent specification of lower and upper errors.
Other optional kwargs:
%(Rectangle)s
See also
--------
bar: Plot a vertical bar plot.
"""
patches = self.bar(left=left, height=height, width=width,
bottom=bottom, orientation='horizontal', **kwargs)
return patches
@docstring.dedent_interpd
def broken_barh(self, xranges, yrange, **kwargs):
"""
Plot horizontal bars.
Call signature::
broken_barh(self, xranges, yrange, **kwargs)
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
Required arguments:
========= ==============================
Argument Description
========= ==============================
*xranges* sequence of (*xmin*, *xwidth*)
*yrange* sequence of (*ymin*, *ywidth*)
========= ==============================
kwargs are
:class:`matplotlib.collections.BrokenBarHCollection`
properties:
%(BrokenBarHCollection)s
these can either be a single argument, i.e.,::
facecolors = 'black'
or a sequence of arguments for the various bars, i.e.,::
facecolors = ('black', 'red', 'green')
**Example:**
.. plot:: mpl_examples/pylab_examples/broken_barh.py
"""
col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
self.add_collection(col, autolim=True)
self.autoscale_view()
return col
def stem(self, *args, **kwargs):
"""
Create a stem plot.
Call signatures::
stem(y, linefmt='b-', markerfmt='bo', basefmt='r-')
stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
A stem plot plots vertical lines (using *linefmt*) at each *x*
location from the baseline to *y*, and places a marker there
using *markerfmt*. A horizontal line at 0 is is plotted using
*basefmt*.
If no *x* values are provided, the default is (0, 1, ..., len(y) - 1)
Return value is a tuple (*markerline*, *stemlines*,
*baseline*).
.. seealso::
This
`document <http://www.mathworks.com/help/techdoc/ref/stem.html>`_
for details.
**Example:**
.. plot:: mpl_examples/pylab_examples/stem_plot.py
"""
remember_hold = self._hold
if not self._hold:
self.cla()
self.hold(True)
# Assume there's at least one data array
y = np.asarray(args[0])
args = args[1:]
# Try a second one
try:
second = np.asarray(args[0], dtype=np.float)
x, y = y, second
args = args[1:]
except (IndexError, ValueError):
# The second array doesn't make sense, or it doesn't exist
second = np.arange(len(y))
x = second
# Popping some defaults
try:
linefmt = kwargs.pop('linefmt', args[0])
except IndexError:
linefmt = kwargs.pop('linefmt', 'b-')
try:
markerfmt = kwargs.pop('markerfmt', args[1])
except IndexError:
markerfmt = kwargs.pop('markerfmt', 'bo')
try:
basefmt = kwargs.pop('basefmt', args[2])
except IndexError:
basefmt = kwargs.pop('basefmt', 'r-')
bottom = kwargs.pop('bottom', None)
label = kwargs.pop('label', None)
markerline, = self.plot(x, y, markerfmt, label="_nolegend_")
if bottom is None:
bottom = 0
stemlines = []
for thisx, thisy in zip(x, y):
l, = self.plot([thisx, thisx], [bottom, thisy], linefmt,
label="_nolegend_")
stemlines.append(l)
baseline, = self.plot([np.amin(x), np.amax(x)], [bottom, bottom],
basefmt, label="_nolegend_")
self.hold(remember_hold)
stem_container = StemContainer((markerline, stemlines, baseline),
label=label)
self.add_container(stem_container)
return stem_container
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False, labeldistance=1.1,
startangle=None, radius=None, counterclock=True,
wedgeprops=None, textprops=None):
r"""
Plot a pie chart.
Call signature::
pie(x, explode=None, labels=None,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1, startangle=None, radius=None,
counterclock=True, wedgeprops=None, textprops=None)
Make a pie chart of array *x*. The fractional area of each
wedge is given by x/sum(x). If sum(x) <= 1, then the values
of x give the fractional area directly and the array will not
be normalized. The wedges are plotted counterclockwise,
by default starting from the x-axis.
Keyword arguments:
*explode*: [ *None* | len(x) sequence ]
If not *None*, is a ``len(x)`` array which specifies the
fraction of the radius with which to offset each wedge.
*colors*: [ *None* | color sequence ]
A sequence of matplotlib color args through which the pie chart
will cycle.
*labels*: [ *None* | len(x) sequence of strings ]
A sequence of strings providing the labels for each wedge
*autopct*: [ *None* | format string | format function ]
If not *None*, is a string or function used to label the wedges
with their numeric value. The label will be placed inside the
wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be called.
*pctdistance*: scalar
The ratio between the center of each pie slice and the
start of the text generated by *autopct*. Ignored if
*autopct* is *None*; default is 0.6.
*labeldistance*: scalar
The radial distance at which the pie labels are drawn
*shadow*: [ *False* | *True* ]
Draw a shadow beneath the pie.
*startangle*: [ *None* | Offset angle ]
If not *None*, rotates the start of the pie chart by *angle*
degrees counterclockwise from the x-axis.
*radius*: [ *None* | scalar ]
The radius of the pie, if *radius* is *None* it will be set to 1.
*counterclock*: [ *False* | *True* ]
Specify fractions direction, clockwise or counterclockwise.
*wedgeprops*: [ *None* | dict of key value pairs ]
Dict of arguments passed to the wedge objects making the pie.
For example, you can pass in wedgeprops = { 'linewidth' : 3 }
to set the width of the wedge border lines equal to 3.
For more details, look at the doc/arguments of the wedge object.
By default `clip_on=False`.
*textprops*: [ *None* | dict of key value pairs ]
Dict of arguments to pass to the text objects.
The pie chart will probably look best if the figure and axes are
square, or the Axes aspect is equal. e.g.::
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
or::
axes(aspect=1)
Return value:
If *autopct* is *None*, return the tuple (*patches*, *texts*):
- *patches* is a sequence of
:class:`matplotlib.patches.Wedge` instances
- *texts* is a list of the label
:class:`matplotlib.text.Text` instances.
If *autopct* is not *None*, return the tuple (*patches*,
*texts*, *autotexts*), where *patches* and *texts* are as
above, and *autotexts* is a list of
:class:`~matplotlib.text.Text` instances for the numeric
labels.
"""
self.set_frame_on(False)
x = np.asarray(x).astype(np.float32)
sx = float(x.sum())
if sx > 1:
x = np.divide(x, sx)
if labels is None:
labels = [''] * len(x)
if explode is None:
explode = [0] * len(x)
assert(len(x) == len(labels))
assert(len(x) == len(explode))
if colors is None:
colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
center = 0, 0
if radius is None:
radius = 1
# Starting theta1 is the start fraction of the circle
if startangle is None:
theta1 = 0
else:
theta1 = startangle / 360.0
# set default values in wedge_prop
if wedgeprops is None:
wedgeprops = {}
if 'clip_on' not in wedgeprops:
wedgeprops['clip_on'] = False
if textprops is None:
textprops = {}
if 'clip_on' not in textprops:
textprops['clip_on'] = False
texts = []
slices = []
autotexts = []
i = 0
for frac, label, expl in cbook.safezip(x, labels, explode):
x, y = center
theta2 = (theta1 + frac) if counterclock else (theta1 - frac)
thetam = 2 * math.pi * 0.5 * (theta1 + theta2)
x += expl * math.cos(thetam)
y += expl * math.sin(thetam)
w = mpatches.Wedge((x, y), radius, 360. * min(theta1, theta2),
360. * max(theta1, theta2),
facecolor=colors[i % len(colors)],
**wedgeprops)
slices.append(w)
self.add_patch(w)
w.set_label(label)
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02)
shad.set_zorder(0.9 * w.get_zorder())
shad.set_label('_nolegend_')
self.add_patch(shad)
xt = x + labeldistance * radius * math.cos(thetam)
yt = y + labeldistance * radius * math.sin(thetam)
label_alignment = xt > 0 and 'left' or 'right'
t = self.text(xt, yt, label,
size=rcParams['xtick.labelsize'],
horizontalalignment=label_alignment,
verticalalignment='center',
**textprops)
texts.append(t)
if autopct is not None:
xt = x + pctdistance * radius * math.cos(thetam)
yt = y + pctdistance * radius * math.sin(thetam)
if is_string_like(autopct):
s = autopct % (100. * frac)
elif six.callable(autopct):
s = autopct(100. * frac)
else:
raise TypeError(
'autopct must be callable or a format string')
t = self.text(xt, yt, s,
horizontalalignment='center',
verticalalignment='center',
**textprops)
autotexts.append(t)
theta1 = theta2
i += 1
self.set_xlim((-1.25, 1.25))
self.set_ylim((-1.25, 1.25))
self.set_xticks([])
self.set_yticks([])
if autopct is None:
return slices, texts
else:
return slices, texts, autotexts
@docstring.dedent_interpd
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, errorevery=1, capthick=None,
**kwargs):
"""
Plot an errorbar graph.
Call signature::
errorbar(x, y, yerr=None, xerr=None,
fmt='', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, errorevery=1,
capthick=None)
Plot *x* versus *y* with error deltas in *yerr* and *xerr*.
Vertical errorbars are plotted if *yerr* is not *None*.
Horizontal errorbars are plotted if *xerr* is not *None*.
*x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a
single error bar at *x*, *y*.
Optional keyword arguments:
*xerr*/*yerr*: [ scalar | N, Nx1, or 2xN array-like ]
If a scalar number, len(N) array-like object, or an Nx1
array-like object, errorbars are drawn at +/-value relative
to the data.
If a sequence of shape 2xN, errorbars are drawn at -row1
and +row2 relative to the data.
*fmt*: [ '' | 'none' | plot format string ]
The plot format symbol. If *fmt* is 'none' (case-insensitive),
only the errorbars are plotted. This is used for adding
errorbars to a bar plot, for example. Default is '',
an empty plot format string; properties are
then identical to the defaults for :meth:`plot`.
*ecolor*: [ *None* | mpl color ]
A matplotlib color arg which gives the color the errorbar lines;
if *None*, use the color of the line connecting the markers.
*elinewidth*: scalar
The linewidth of the errorbar lines. If *None*, use the linewidth.
*capsize*: scalar
The length of the error bar caps in points
*capthick*: scalar
An alias kwarg to *markeredgewidth* (a.k.a. - *mew*). This
setting is a more sensible name for the property that
controls the thickness of the error bar cap in points. For
backwards compatibility, if *mew* or *markeredgewidth* are given,
then they will over-ride *capthick*. This may change in future
releases.
*barsabove*: [ *True* | *False* ]
if *True*, will plot the errorbars above the plot
symbols. Default is below.
*lolims* / *uplims* / *xlolims* / *xuplims*: [ *False* | *True* ]
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*. To use limits with inverted
axes, :meth:`set_xlim` or :meth:`set_ylim` must be called
before :meth:`errorbar`.
*errorevery*: positive integer
subsamples the errorbars. e.g., if everyerror=5, errorbars for
every 5-th datapoint will be plotted. The data plot itself still
shows all data points.
All other keyword arguments are passed on to the plot command for the
markers. For example, this code makes big red squares with
thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s',
mfc='red', mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewith*.
valid kwargs for the marker properties are
%(Line2D)s
Returns (*plotline*, *caplines*, *barlinecols*):
*plotline*: :class:`~matplotlib.lines.Line2D` instance
*x*, *y* plot markers and/or line
*caplines*: list of error bar cap
:class:`~matplotlib.lines.Line2D` instances
*barlinecols*: list of
:class:`~matplotlib.collections.LineCollection` instances for
the horizontal and vertical error ranges.
**Example:**
.. plot:: mpl_examples/statistics/errorbar_demo.py
"""
if errorevery < 1:
raise ValueError(
'errorevery has to be a strictly positive integer')
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold:
self.cla()
holdstate = self._hold
self._hold = True
if fmt is None:
fmt = 'none'
msg = ('Use of None object as fmt keyword argument to '
+ 'suppress plotting of data values is deprecated '
+ 'since 1.4; use the string "none" instead.')
warnings.warn(msg, mplDeprecation, stacklevel=1)
plot_line = (fmt.lower() != 'none')
label = kwargs.pop("label", None)
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr] * len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr] * len(y)
l0 = None
# Instead of using zorder, the line plot is being added
# either here, or after all the errorbar plot elements.
if barsabove and plot_line:
l0, = self.plot(x, y, fmt, label="_nolegend_", **kwargs)
barcols = []
caplines = []
lines_kw = {'label': '_nolegend_'}
if elinewidth:
lines_kw['linewidth'] = elinewidth
else:
for key in ('linewidth', 'lw'):
if key in kwargs:
lines_kw[key] = kwargs[key]
for key in ('transform', 'alpha', 'zorder'):
if key in kwargs:
lines_kw[key] = kwargs[key]
# arrays fine here, they are booleans and hence not units
if not iterable(lolims):
lolims = np.asarray([lolims] * len(x), bool)
else:
lolims = np.asarray(lolims, bool)
if not iterable(uplims):
uplims = np.array([uplims] * len(x), bool)
else:
uplims = np.asarray(uplims, bool)
if not iterable(xlolims):
xlolims = np.array([xlolims] * len(x), bool)
else:
xlolims = np.asarray(xlolims, bool)
if not iterable(xuplims):
xuplims = np.array([xuplims] * len(x), bool)
else:
xuplims = np.asarray(xuplims, bool)
everymask = np.arange(len(x)) % errorevery == 0
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs) == len(ys)
assert len(xs) == len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
plot_kw = {'label': '_nolegend_'}
if capsize > 0:
plot_kw['ms'] = 2. * capsize
if capthick is not None:
# 'mew' has higher priority, I believe,
# if both 'mew' and 'markeredgewidth' exists.
# So, save capthick to markeredgewidth so that
# explicitly setting mew or markeredgewidth will
# over-write capthick.
plot_kw['markeredgewidth'] = capthick
# For backwards-compat, allow explicit setting of
# 'mew' or 'markeredgewidth' to over-ride capthick.
for key in ('markeredgewidth', 'mew', 'transform', 'alpha', 'zorder'):
if key in kwargs:
plot_kw[key] = kwargs[key]
if xerr is not None:
if (iterable(xerr) and len(xerr) == 2 and
iterable(xerr[0]) and iterable(xerr[1])):
# using list comps rather than arrays to preserve units
left = [thisx - thiserr for (thisx, thiserr)
in cbook.safezip(x, xerr[0])]
right = [thisx + thiserr for (thisx, thiserr)
in cbook.safezip(x, xerr[1])]
else:
# using list comps rather than arrays to preserve units
left = [thisx - thiserr for (thisx, thiserr)
in cbook.safezip(x, xerr)]
right = [thisx + thiserr for (thisx, thiserr)
in cbook.safezip(x, xerr)]
# select points without upper/lower limits in x and
# draw normal errorbars for these points
noxlims = ~(xlolims | xuplims)
if noxlims.any():
yo, _ = xywhere(y, right, noxlims & everymask)
lo, ro = xywhere(left, right, noxlims & everymask)
barcols.append(self.hlines(yo, lo, ro, **lines_kw))
if capsize > 0:
caplines.extend(self.plot(lo, yo, 'k|', **plot_kw))
caplines.extend(self.plot(ro, yo, 'k|', **plot_kw))
if xlolims.any():
yo, _ = xywhere(y, right, xlolims & everymask)
lo, ro = xywhere(x, right, xlolims & everymask)
barcols.append(self.hlines(yo, lo, ro, **lines_kw))
rightup, yup = xywhere(right, y, xlolims & everymask)
if self.xaxis_inverted():
marker = mlines.CARETLEFT
else:
marker = mlines.CARETRIGHT
caplines.extend(
self.plot(rightup, yup, ls='None', marker=marker,
**plot_kw))
if capsize > 0:
xlo, ylo = xywhere(x, y, xlolims & everymask)
caplines.extend(self.plot(xlo, ylo, 'k|', **plot_kw))
if xuplims.any():
yo, _ = xywhere(y, right, xuplims & everymask)
lo, ro = xywhere(left, x, xuplims & everymask)
barcols.append(self.hlines(yo, lo, ro, **lines_kw))
leftlo, ylo = xywhere(left, y, xuplims & everymask)
if self.xaxis_inverted():
marker = mlines.CARETRIGHT
else:
marker = mlines.CARETLEFT
caplines.extend(
self.plot(leftlo, ylo, ls='None', marker=marker,
**plot_kw))
if capsize > 0:
xup, yup = xywhere(x, y, xuplims & everymask)
caplines.extend(self.plot(xup, yup, 'k|', **plot_kw))
if yerr is not None:
if (iterable(yerr) and len(yerr) == 2 and
iterable(yerr[0]) and iterable(yerr[1])):
# using list comps rather than arrays to preserve units
lower = [thisy - thiserr for (thisy, thiserr)
in cbook.safezip(y, yerr[0])]
upper = [thisy + thiserr for (thisy, thiserr)
in cbook.safezip(y, yerr[1])]
else:
# using list comps rather than arrays to preserve units
lower = [thisy - thiserr for (thisy, thiserr)
in cbook.safezip(y, yerr)]
upper = [thisy + thiserr for (thisy, thiserr)
in cbook.safezip(y, yerr)]
# select points without upper/lower limits in y and
# draw normal errorbars for these points
noylims = ~(lolims | uplims)
if noylims.any():
xo, _ = xywhere(x, lower, noylims & everymask)
lo, uo = xywhere(lower, upper, noylims & everymask)
barcols.append(self.vlines(xo, lo, uo, **lines_kw))
if capsize > 0:
caplines.extend(self.plot(xo, lo, 'k_', **plot_kw))
caplines.extend(self.plot(xo, uo, 'k_', **plot_kw))
if lolims.any():
xo, _ = xywhere(x, lower, lolims & everymask)
lo, uo = xywhere(y, upper, lolims & everymask)
barcols.append(self.vlines(xo, lo, uo, **lines_kw))
xup, upperup = xywhere(x, upper, lolims & everymask)
if self.yaxis_inverted():
marker = mlines.CARETDOWN
else:
marker = mlines.CARETUP
caplines.extend(
self.plot(xup, upperup, ls='None', marker=marker,
**plot_kw))
if capsize > 0:
xlo, ylo = xywhere(x, y, lolims & everymask)
caplines.extend(self.plot(xlo, ylo, 'k_', **plot_kw))
if uplims.any():
xo, _ = xywhere(x, lower, uplims & everymask)
lo, uo = xywhere(lower, y, uplims & everymask)
barcols.append(self.vlines(xo, lo, uo, **lines_kw))
xlo, lowerlo = xywhere(x, lower, uplims & everymask)
if self.yaxis_inverted():
marker = mlines.CARETUP
else:
marker = mlines.CARETDOWN
caplines.extend(
self.plot(xlo, lowerlo, ls='None', marker=marker,
**plot_kw))
if capsize > 0:
xup, yup = xywhere(x, y, uplims & everymask)
caplines.extend(self.plot(xup, yup, 'k_', **plot_kw))
if not barsabove and plot_line:
l0, = self.plot(x, y, fmt, **kwargs)
if ecolor is None:
if l0 is None:
ecolor = six.next(self._get_lines.color_cycle)
else:
ecolor = l0.get_color()
for l in barcols:
l.set_color(ecolor)
for l in caplines:
l.set_color(ecolor)
self.autoscale_view()
self._hold = holdstate
errorbar_container = ErrorbarContainer((l0, tuple(caplines),
tuple(barcols)),
has_xerr=(xerr is not None),
has_yerr=(yerr is not None),
label=label)
self.containers.append(errorbar_container)
return errorbar_container # (l0, caplines, barcols)
def boxplot(self, x, notch=False, sym=None, vert=True, whis=1.5,
positions=None, widths=None, patch_artist=False,
bootstrap=None, usermedians=None, conf_intervals=None,
meanline=False, showmeans=False, showcaps=True,
showbox=True, showfliers=True, boxprops=None, labels=None,
flierprops=None, medianprops=None, meanprops=None,
capprops=None, whiskerprops=None, manage_xticks=True):
"""
Make a box and whisker plot.
Call signature::
boxplot(self, x, notch=False, sym='b+', vert=True, whis=1.5,
positions=None, widths=None, patch_artist=False,
bootstrap=None, usermedians=None, conf_intervals=None,
meanline=False, showmeans=False, showcaps=True,
showbox=True, showfliers=True, boxprops=None, labels=None,
flierprops=None, medianprops=None, meanprops=None,
capprops=None, whiskerprops=None, manage_xticks=True):
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
Parameters
----------
x : Array or a sequence of vectors.
The input data.
notch : bool, default = False
If False, produces a rectangular box plot.
If True, will produce a notched box plot
sym : str or None, default = None
The default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
If `None`, then the fliers default to 'b+' If you want more
control use the flierprops kwarg.
vert : bool, default = True
If True (default), makes the boxes vertical.
If False, makes horizontal boxes.
whis : float, sequence (default = 1.5) or string
As a float, determines the reach of the whiskers past the first
and third quartiles (e.g., Q3 + whis*IQR, IQR = interquartile
range, Q3-Q1). Beyond the whiskers, data are considered outliers
and are plotted as individual points. Set this to an unreasonably
high value to force the whiskers to show the min and max values.
Alternatively, set this to an ascending sequence of percentile
(e.g., [5, 95]) to set the whiskers at specific percentiles of
the data. Finally, *whis* can be the string 'range' to force the
whiskers to the min and max of the data. In the edge case that
the 25th and 75th percentiles are equivalent, *whis* will be
automatically set to 'range'.
bootstrap : None (default) or integer
Specifies whether to bootstrap the confidence intervals
around the median for notched boxplots. If bootstrap==None,
no bootstrapping is performed, and notches are calculated
using a Gaussian-based asymptotic approximation (see McGill, R.,
Tukey, J.W., and Larsen, W.A., 1978, and Kendall and Stuart,
1967). Otherwise, bootstrap specifies the number of times to
bootstrap the median to determine it's 95% confidence intervals.
Values between 1000 and 10000 are recommended.
usermedians : array-like or None (default)
An array or sequence whose first dimension (or length) is
compatible with *x*. This overrides the medians computed by
matplotlib for each element of *usermedians* that is not None.
When an element of *usermedians* == None, the median will be
computed by matplotlib as normal.
conf_intervals : array-like or None (default)
Array or sequence whose first dimension (or length) is compatible
with *x* and whose second dimension is 2. When the current element
of *conf_intervals* is not None, the notch locations computed by
matplotlib are overridden (assuming notch is True). When an
element of *conf_intervals* is None, boxplot compute notches the
method specified by the other kwargs (e.g., *bootstrap*).
positions : array-like, default = [1, 2, ..., n]
Sets the positions of the boxes. The ticks and limits
are automatically set to match the positions.
widths : array-like, default = 0.5
Either a scalar or a vector and sets the width of each box. The
default is 0.5, or ``0.15*(distance between extreme positions)``
if that is smaller.
labels : sequence or None (default)
Labels for each dataset. Length must be compatible with
dimensions of *x*
patch_artist : bool, default = False
If False produces boxes with the Line2D artist
If True produces boxes with the Patch artist
showmeans : bool, default = False
If True, will toggle one the rendering of the means
showcaps : bool, default = True
If True, will toggle one the rendering of the caps
showbox : bool, default = True
If True, will toggle one the rendering of box
showfliers : bool, default = True
If True, will toggle one the rendering of the fliers
boxprops : dict or None (default)
If provided, will set the plotting style of the boxes
whiskerprops : dict or None (default)
If provided, will set the plotting style of the whiskers
capprops : dict or None (default)
If provided, will set the plotting style of the caps
flierprops : dict or None (default)
If provided, will set the plotting style of the fliers
medianprops : dict or None (default)
If provided, will set the plotting style of the medians
meanprops : dict or None (default)
If provided, will set the plotting style of the means
meanline : bool, default = False
If True (and *showmeans* is True), will try to render the mean
as a line spanning the full width of the box according to
*meanprops*. Not recommended if *shownotches* is also True.
Otherwise, means will be shown as points.
Returns
-------
result : dict
A dictionary mapping each component of the boxplot
to a list of the :class:`matplotlib.lines.Line2D`
instances created. That dictionary has the following keys
(assuming vertical boxplots):
- boxes: the main body of the boxplot showing the quartiles
and the median's confidence intervals if enabled.
- medians: horizonal lines at the median of each box.
- whiskers: the vertical lines extending to the most extreme,
n-outlier data points.
- caps: the horizontal lines at the ends of the whiskers.
- fliers: points representing data that extend beyond the
whiskers (outliers).
- means: points or lines representing the means.
Examples
--------
.. plot:: mpl_examples/statistics/boxplot_demo.py
"""
bxpstats = cbook.boxplot_stats(x, whis=whis, bootstrap=bootstrap,
labels=labels)
# make sure we have a dictionary
if flierprops is None:
flierprops = dict()
# if non-default sym value, put it into the flier dictionary
# the logic for providing the default symbol ('b+') now lives
# in bxp in the initial value of final_flierprops
# handle all of the `sym` related logic here so we only have to pass
# on the flierprops dict.
if sym is not None:
# no-flier case, which should really be done with
# 'showfliers=False' but none-the-less deal with it to keep back
# compatibility
if sym == '':
# blow away existing dict and make one for invisible markers
flierprops = dict(linestyle='none', marker='',
color='none')
# turn the fliers off just to be safe
showfliers = False
# now process the symbol string
else:
# process the symbol string
# discarded linestyle
_, marker, color = _process_plot_format(sym)
# if we have a marker, use it
if marker is not None:
flierprops['marker'] = marker
# if we have a color, use it
if color is not None:
# assume that if color is passed in the user want
# filled symbol, if the users want more control use
# flierprops
flierprops['color'] = color
# replace medians if necessary:
if usermedians is not None:
if (len(np.ravel(usermedians)) != len(bxpstats) or
np.shape(usermedians)[0] != len(bxpstats)):
medmsg = 'usermedians length not compatible with x'
raise ValueError(medmsg)
else:
# reassign medians as necessary
for stats, med in zip(bxpstats, usermedians):
if med is not None:
stats['med'] = med
if conf_intervals is not None:
if np.shape(conf_intervals)[0] != len(bxpstats):
raise ValueError('conf_intervals length not '
'compatible with x')
else:
for stats, ci in zip(bxpstats, conf_intervals):
if ci is not None:
if len(ci) != 2:
raise ValueError('each confidence interval must '
'have two values')
else:
if ci[0] is not None:
stats['cilo'] = ci[0]
if ci[1] is not None:
stats['cihi'] = ci[1]
artists = self.bxp(bxpstats, positions=positions, widths=widths,
vert=vert, patch_artist=patch_artist,
shownotches=notch, showmeans=showmeans,
showcaps=showcaps, showbox=showbox,
boxprops=boxprops, flierprops=flierprops,
medianprops=medianprops, meanprops=meanprops,
meanline=meanline, showfliers=showfliers,
capprops=capprops, whiskerprops=whiskerprops,
manage_xticks=manage_xticks)
return artists
def bxp(self, bxpstats, positions=None, widths=None, vert=True,
patch_artist=False, shownotches=False, showmeans=False,
showcaps=True, showbox=True, showfliers=True,
boxprops=None, whiskerprops=None, flierprops=None,
medianprops=None, capprops=None, meanprops=None,
meanline=False, manage_xticks=True):
"""
Drawing function for box and whisker plots.
Call signature::
bxp(self, bxpstats, positions=None, widths=None, vert=True,
patch_artist=False, shownotches=False, showmeans=False,
showcaps=True, showbox=True, showfliers=True,
boxprops=None, whiskerprops=None, flierprops=None,
medianprops=None, capprops=None, meanprops=None,
meanline=False, manage_xticks=True):
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
Parameters
----------
bxpstats : list of dicts
A list of dictionaries containing stats for each boxplot.
Required keys are:
- ``med``: The median (scalar float).
- ``q1``: The first quartile (25th percentile) (scalar
float).
- ``q3``: The first quartile (50th percentile) (scalar
float).
- ``whislo``: Lower bound of the lower whisker (scalar
float).
- ``whishi``: Upper bound of the upper whisker (scalar
float).
Optional keys are:
- ``mean``: The mean (scalar float). Needed if
``showmeans=True``.
- ``fliers``: Data beyond the whiskers (sequence of floats).
Needed if ``showfliers=True``.
- ``cilo`` & ``cihi``: Lower and upper confidence intervals
about the median. Needed if ``shownotches=True``.
- ``label``: Name of the dataset (string). If available,
this will be used a tick label for the boxplot
positions : array-like, default = [1, 2, ..., n]
Sets the positions of the boxes. The ticks and limits
are automatically set to match the positions.
widths : array-like, default = 0.5
Either a scalar or a vector and sets the width of each
box. The default is 0.5, or ``0.15*(distance between extreme
positions)`` if that is smaller.
vert : bool, default = False
If `True` (default), makes the boxes vertical. If `False`,
makes horizontal boxes.
patch_artist : bool, default = False
If `False` produces boxes with the
`~matplotlib.lines.Line2D` artist. If `True` produces boxes
with the `~matplotlib.patches.Patch` artist.
shownotches : bool, default = False
If `False` (default), produces a rectangular box plot.
If `True`, will produce a notched box plot
showmeans : bool, default = False
If `True`, will toggle one the rendering of the means
showcaps : bool, default = True
If `True`, will toggle one the rendering of the caps
showbox : bool, default = True
If `True`, will toggle one the rendering of box
showfliers : bool, default = True
If `True`, will toggle one the rendering of the fliers
boxprops : dict or None (default)
If provided, will set the plotting style of the boxes
whiskerprops : dict or None (default)
If provided, will set the plotting style of the whiskers
capprops : dict or None (default)
If provided, will set the plotting style of the caps
flierprops : dict or None (default)
If provided will set the plotting style of the fliers
medianprops : dict or None (default)
If provided, will set the plotting style of the medians
meanprops : dict or None (default)
If provided, will set the plotting style of the means
meanline : bool, default = False
If `True` (and *showmeans* is `True`), will try to render the mean
as a line spanning the full width of the box according to
*meanprops*. Not recommended if *shownotches* is also True.
Otherwise, means will be shown as points.
manage_xticks : bool, default = True
If the function should adjust the xlim and xtick locations.
Returns
-------
result : dict
A dictionary mapping each component of the boxplot to a list
of the :class:`matplotlib.lines.Line2D` instances
created. That dictionary has the following keys (assuming
vertical boxplots):
- ``boxes``: the main body of the boxplot showing the
quartiles and the median's confidence intervals if
enabled.
- ``medians``: horizonal lines at the median of each box.
- ``whiskers``: the vertical lines extending to the most
extreme, n-outlier data points.
- ``caps``: the horizontal lines at the ends of the
whiskers.
- ``fliers``: points representing data that extend beyond
the whiskers (fliers).
- ``means``: points or lines representing the means.
Examples
--------
.. plot:: mpl_examples/statistics/bxp_demo.py
"""
# lists of artists to be output
whiskers = []
caps = []
boxes = []
medians = []
means = []
fliers = []
# empty list of xticklabels
datalabels = []
# translates between line2D and patch linestyles
linestyle_map = {
'solid': '-',
'dashed': '--',
'dashdot': '-.',
'dotted': ':'
}
# box properties
if patch_artist:
final_boxprops = dict(linestyle='solid', edgecolor='black',
facecolor='white', linewidth=1)
else:
final_boxprops = dict(linestyle='-', color='blue')
if boxprops is not None:
final_boxprops.update(boxprops)
# other (cap, whisker) properties
final_whiskerprops = dict(
linestyle='--',
color='blue',
)
final_capprops = dict(
linestyle='-',
color='black',
)
if capprops is not None:
final_capprops.update(capprops)
if whiskerprops is not None:
final_whiskerprops.update(whiskerprops)
# set up the default flier properties
final_flierprops = dict(linestyle='none', marker='+', color='blue')
# flier (outlier) properties
if flierprops is not None:
final_flierprops.update(flierprops)
# median line properties
final_medianprops = dict(linestyle='-', color='red')
if medianprops is not None:
final_medianprops.update(medianprops)
# mean (line or point) properties
if meanline:
final_meanprops = dict(linestyle='--', color='black')
else:
final_meanprops = dict(linestyle='none', markerfacecolor='red',
marker='s')
if meanprops is not None:
final_meanprops.update(meanprops)
def to_vc(xs, ys):
# convert arguments to verts and codes
verts = []
#codes = []
for xi, yi in zip(xs, ys):
verts.append((xi, yi))
verts.append((0, 0)) # ignored
codes = [mpath.Path.MOVETO] + \
[mpath.Path.LINETO] * (len(verts) - 2) + \
[mpath.Path.CLOSEPOLY]
return verts, codes
def patch_list(xs, ys, **kwargs):
verts, codes = to_vc(xs, ys)
path = mpath.Path(verts, codes)
patch = mpatches.PathPatch(path, **kwargs)
self.add_artist(patch)
return [patch]
# vertical or horizontal plot?
if vert:
def doplot(*args, **kwargs):
return self.plot(*args, **kwargs)
def dopatch(xs, ys, **kwargs):
return patch_list(xs, ys, **kwargs)
else:
def doplot(*args, **kwargs):
shuffled = []
for i in xrange(0, len(args), 2):
shuffled.extend([args[i + 1], args[i]])
return self.plot(*shuffled, **kwargs)
def dopatch(xs, ys, **kwargs):
xs, ys = ys, xs # flip X, Y
return patch_list(xs, ys, **kwargs)
# input validation
N = len(bxpstats)
datashape_message = ("List of boxplot statistics and `{0}` "
"values must have same the length")
# check position
if positions is None:
positions = list(xrange(1, N + 1))
elif len(positions) != N:
raise ValueError(datashape_message.format("positions"))
# width
if widths is None:
distance = max(positions) - min(positions)
widths = [min(0.15 * max(distance, 1.0), 0.5)] * N
elif np.isscalar(widths):
widths = [widths] * N
elif len(widths) != N:
raise ValueError(datashape_message.format("widths"))
# check and save the `hold` state of the current axes
if not self._hold:
self.cla()
holdStatus = self._hold
for pos, width, stats in zip(positions, widths, bxpstats):
# try to find a new label
datalabels.append(stats.get('label', pos))
# fliers coords
flier_x = np.ones(len(stats['fliers'])) * pos
flier_y = stats['fliers']
# whisker coords
whisker_x = np.ones(2) * pos
whiskerlo_y = np.array([stats['q1'], stats['whislo']])
whiskerhi_y = np.array([stats['q3'], stats['whishi']])
# cap coords
cap_left = pos - width * 0.25
cap_right = pos + width * 0.25
cap_x = np.array([cap_left, cap_right])
cap_lo = np.ones(2) * stats['whislo']
cap_hi = np.ones(2) * stats['whishi']
# box and median coords
box_left = pos - width * 0.5
box_right = pos + width * 0.5
med_y = [stats['med'], stats['med']]
# notched boxes
if shownotches:
box_x = [box_left, box_right, box_right, cap_right, box_right,
box_right, box_left, box_left, cap_left, box_left,
box_left]
box_y = [stats['q1'], stats['q1'], stats['cilo'],
stats['med'], stats['cihi'], stats['q3'],
stats['q3'], stats['cihi'], stats['med'],
stats['cilo'], stats['q1']]
med_x = cap_x
# plain boxes
else:
box_x = [box_left, box_right, box_right, box_left, box_left]
box_y = [stats['q1'], stats['q1'], stats['q3'], stats['q3'],
stats['q1']]
med_x = [box_left, box_right]
# maybe draw the box:
if showbox:
if patch_artist:
boxes.extend(dopatch(box_x, box_y, **final_boxprops))
else:
boxes.extend(doplot(box_x, box_y, **final_boxprops))
# draw the whiskers
whiskers.extend(doplot(
whisker_x, whiskerlo_y, **final_whiskerprops
))
whiskers.extend(doplot(
whisker_x, whiskerhi_y, **final_whiskerprops
))
# maybe draw the caps:
if showcaps:
caps.extend(doplot(cap_x, cap_lo, **final_capprops))
caps.extend(doplot(cap_x, cap_hi, **final_capprops))
# draw the medians
medians.extend(doplot(med_x, med_y, **final_medianprops))
# maybe draw the means
if showmeans:
if meanline:
means.extend(doplot(
[box_left, box_right], [stats['mean'], stats['mean']],
**final_meanprops
))
else:
means.extend(doplot(
[pos], [stats['mean']], **final_meanprops
))
# maybe draw the fliers
if showfliers:
fliers.extend(doplot(
flier_x, flier_y, **final_flierprops
))
# fix our axes/ticks up a little
if vert:
setticks = self.set_xticks
setlim = self.set_xlim
setlabels = self.set_xticklabels
else:
setticks = self.set_yticks
setlim = self.set_ylim
setlabels = self.set_yticklabels
if manage_xticks:
newlimits = min(positions) - 0.5, max(positions) + 0.5
setlim(newlimits)
setticks(positions)
setlabels(datalabels)
# reset hold status
self.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers, means=means)
@docstring.dedent_interpd
def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=None, linewidths=None,
verts=None, **kwargs):
"""
Make a scatter plot of x vs y, where x and y are sequence like objects
of the same lengths.
Parameters
----------
x, y : array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, ), optional, default: 20
size in points^2.
c : color or sequence of color, optional, default : 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs
(see below). Note that `c` should not be a single numeric RGB or
RGBA sequence because that is indistinguishable from an array of
values to be colormapped. `c` can be a 2-D array in which the
rows are RGB or RGBA, however.
marker : `~matplotlib.markers.MarkerStyle`, optional, default: 'o'
See `~matplotlib.markers` for more information on the different
styles of markers scatter supports.
cmap : `~matplotlib.colors.Colormap`, optional, default: None
A `~matplotlib.colors.Colormap` instance or registered name.
`cmap` is only used if `c` is an array of floats. If None,
defaults to rc `image.cmap`.
norm : `~matplotlib.colors.Normalize`, optional, default: None
A `~matplotlib.colors.Normalize` instance is used to scale
luminance data to 0, 1. `norm` is only used if `c` is an array of
floats. If `None`, use the default :func:`normalize`.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
alpha : scalar, optional, default: None
The alpha blending value, between 0 (transparent) and 1 (opaque)
linewidths : scalar or array_like, optional, default: None
If None, defaults to (lines.linewidth,). Note that this is a
tuple, and if you set the linewidths argument you must set it as a
sequence of floats, as required by
`~matplotlib.collections.RegularPolyCollection`.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Other parameters
----------------
kwargs : `~matplotlib.collections.Collection` properties
Notes
------
Any or all of `x`, `y`, `s`, and `c` may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Examples
--------
.. plot:: mpl_examples/shapes_and_collections/scatter_demo.py
"""
if not self._hold:
self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x = self.convert_xunits(x)
y = self.convert_yunits(y)
# np.ma.ravel yields an ndarray, not a masked array,
# unless its argument is a masked array.
x = np.ma.ravel(x)
y = np.ma.ravel(y)
if x.size != y.size:
raise ValueError("x and y must be the same size")
s = np.ma.ravel(s) # This doesn't have to match x, y in size.
c_is_stringy = is_string_like(c) or is_sequence_of_strings(c)
if not c_is_stringy:
c = np.asanyarray(c)
if c.size == x.size:
c = np.ma.ravel(c)
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
scales = s # Renamed for readability below.
if c_is_stringy:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
else:
# The inherent ambiguity is resolved in favor of color
# mapping, not interpretation as rgb or rgba:
if c.size == x.size:
colors = None # use cmap, norm after collection is created
else:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
faceted = kwargs.pop('faceted', None)
edgecolors = kwargs.get('edgecolors', None)
if faceted is not None:
cbook.warn_deprecated(
'1.2', name='faceted', alternative='edgecolor',
obj_type='option')
if faceted:
edgecolors = None
else:
edgecolors = 'none'
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(
marker_obj.get_transform())
if not marker_obj.is_filled():
edgecolors = 'face'
offsets = np.dstack((x, y))
collection = mcoll.PathCollection(
(path,), scales,
facecolors=colors,
edgecolors=edgecolors,
linewidths=linewidths,
offsets=offsets,
transOffset=kwargs.pop('transform', self.transData),
)
collection.set_transform(mtransforms.IdentityTransform())
collection.set_alpha(alpha)
collection.update(kwargs)
if colors is None:
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
# The margin adjustment is a hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important.
# Also, only bother with this padding if there is anything to draw.
if self._xmargin < 0.05 and x.size > 0:
self.set_xmargin(0.05)
if self._ymargin < 0.05 and x.size > 0:
self.set_ymargin(0.05)
self.add_collection(collection)
self.autoscale_view()
return collection
@docstring.dedent_interpd
def hexbin(self, x, y, C=None, gridsize=100, bins=None,
xscale='linear', yscale='linear', extent=None,
cmap=None, norm=None, vmin=None, vmax=None,
alpha=None, linewidths=None, edgecolors='none',
reduce_C_function=np.mean, mincnt=None, marginals=False,
**kwargs):
"""
Make a hexagonal binning plot.
Call signature::
hexbin(x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=None, linewidths=None, edgecolors='none'
reduce_C_function = np.mean, mincnt=None, marginals=True
**kwargs)
Make a hexagonal binning plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is *None*
(the default), this is a histogram of the number of occurences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i],y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to numpy's mean function (np.mean). (If *C* is
specified, it must also be a 1-D sequence of the same length
as *x* and *y*.)
*x*, *y* and/or *C* may be masked arrays, in which case only
unmasked points will be plotted.
Optional keyword arguments:
*gridsize*: [ 100 | integer ]
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
*bins*: [ *None* | 'log' | integer | sequence ]
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Internally, :math:`log_{10}(i+1)` is used to
determine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
*xscale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the horizontal axis.
*scale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the vertical axis.
*mincnt*: [ *None* | a positive integer ]
If not *None*, only display cells with more than *mincnt*
number of points in the cell
*marginals*: [ *True* | *False* ]
if marginals is *True*, plot the marginal density as
colormapped rectagles along the bottom of the x-axis and
left of the y-axis
*extent*: [ *None* | scalars (left, right, bottom, top) ]
The limits of the bins. The default assigns the limits
based on gridsize, x, y, xscale and yscale.
Other keyword arguments controlling color mapping and normalization
arguments:
*cmap*: [ *None* | Colormap ]
a :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``image.cmap``.
*norm*: [ *None* | Normalize ]
:class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1.
*vmin* / *vmax*: scalar
*vmin* and *vmax* are used in conjunction with *norm* to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. Note if you pass a norm instance, your settings
for *vmin* and *vmax* will be ignored.
*alpha*: scalar between 0 and 1, or *None*
the alpha value for the patches
*linewidths*: [ *None* | scalar ]
If *None*, defaults to rc lines.linewidth. Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Other keyword arguments controlling the Collection properties:
*edgecolors*: [ *None* | ``'none'`` | mpl color | color sequence ]
If ``'none'``, draws the edges in the same color as the fill color.
This is the default, as it avoids unsightly unpainted pixels
between the hexagons.
If *None*, draws the outlines in the default color.
If a matplotlib color arg or sequence of rgba tuples, draws the
outlines in the specified color.
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
The return value is a
:class:`~matplotlib.collections.PolyCollection` instance; use
:meth:`~matplotlib.collections.PolyCollection.get_array` on
this :class:`~matplotlib.collections.PolyCollection` to get
the counts in each hexagon. If *marginals* is *True*, horizontal
bar and vertical bar (both PolyCollections) will be attached
to the return collection as attributes *hbar* and *vbar*.
**Example:**
.. plot:: mpl_examples/pylab_examples/hexbin_demo.py
"""
if not self._hold:
self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx / math.sqrt(3))
# Count the number of data in each hexagon
x = np.array(x, float)
y = np.array(y, float)
if xscale == 'log':
if np.any(x <= 0.0):
raise ValueError("x contains non-positive values, so can not"
" be log-scaled")
x = np.log10(x)
if yscale == 'log':
if np.any(y <= 0.0):
raise ValueError("y contains non-positive values, so can not"
" be log-scaled")
y = np.log10(y)
if extent is not None:
xmin, xmax, ymin, ymax = extent
else:
xmin = np.amin(x)
xmax = np.amax(x)
ymin = np.amin(y)
ymax = np.amax(y)
# to avoid issues with singular data, expand the min/max pairs
xmin, xmax = mtrans.nonsingular(xmin, xmax, expander=0.1)
ymin, ymax = mtrans.nonsingular(ymin, ymax, expander=0.1)
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax - xmin) / nx
sy = (ymax - ymin) / ny
if marginals:
xorig = x.copy()
yorig = y.copy()
x = (x - xmin) / sx
y = (y - ymin) / sy
ix1 = np.round(x).astype(int)
iy1 = np.round(y).astype(int)
ix2 = np.floor(x).astype(int)
iy2 = np.floor(y).astype(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1 * ny1 + nx2 * ny2
d1 = (x - ix1) ** 2 + 3.0 * (y - iy1) ** 2
d2 = (x - ix2 - 0.5) ** 2 + 3.0 * (y - iy2 - 0.5) ** 2
bdist = (d1 < d2)
if C is None:
accum = np.zeros(n)
# Create appropriate views into "accum" array.
lattice1 = accum[:nx1 * ny1]
lattice2 = accum[nx1 * ny1:]
lattice1.shape = (nx1, ny1)
lattice2.shape = (nx2, ny2)
for i in xrange(len(x)):
if bdist[i]:
if ((ix1[i] >= 0) and (ix1[i] < nx1) and
(iy1[i] >= 0) and (iy1[i] < ny1)):
lattice1[ix1[i], iy1[i]] += 1
else:
if ((ix2[i] >= 0) and (ix2[i] < nx2) and
(iy2[i] >= 0) and (iy2[i] < ny2)):
lattice2[ix2[i], iy2[i]] += 1
# threshold
if mincnt is not None:
for i in xrange(nx1):
for j in xrange(ny1):
if lattice1[i, j] < mincnt:
lattice1[i, j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
if lattice2[i, j] < mincnt:
lattice2[i, j] = np.nan
accum = np.hstack((lattice1.astype(float).ravel(),
lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
else:
if mincnt is None:
mincnt = 0
# create accumulation arrays
lattice1 = np.empty((nx1, ny1), dtype=object)
for i in xrange(nx1):
for j in xrange(ny1):
lattice1[i, j] = []
lattice2 = np.empty((nx2, ny2), dtype=object)
for i in xrange(nx2):
for j in xrange(ny2):
lattice2[i, j] = []
for i in xrange(len(x)):
if bdist[i]:
if ((ix1[i] >= 0) and (ix1[i] < nx1) and
(iy1[i] >= 0) and (iy1[i] < ny1)):
lattice1[ix1[i], iy1[i]].append(C[i])
else:
if ((ix2[i] >= 0) and (ix2[i] < nx2) and
(iy2[i] >= 0) and (iy2[i] < ny2)):
lattice2[ix2[i], iy2[i]].append(C[i])
for i in xrange(nx1):
for j in xrange(ny1):
vals = lattice1[i, j]
if len(vals) > mincnt:
lattice1[i, j] = reduce_C_function(vals)
else:
lattice1[i, j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
vals = lattice2[i, j]
if len(vals) > mincnt:
lattice2[i, j] = reduce_C_function(vals)
else:
lattice2[i, j] = np.nan
accum = np.hstack((lattice1.astype(float).ravel(),
lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
offsets = np.zeros((n, 2), float)
offsets[:nx1 * ny1, 0] = np.repeat(np.arange(nx1), ny1)
offsets[:nx1 * ny1, 1] = np.tile(np.arange(ny1), nx1)
offsets[nx1 * ny1:, 0] = np.repeat(np.arange(nx2) + 0.5, ny2)
offsets[nx1 * ny1:, 1] = np.tile(np.arange(ny2), nx2) + 0.5
offsets[:, 0] *= sx
offsets[:, 1] *= sy
offsets[:, 0] += xmin
offsets[:, 1] += ymin
# remove accumulation bins with no data
offsets = offsets[good_idxs, :]
accum = accum[good_idxs]
polygon = np.zeros((6, 2), float)
polygon[:, 0] = sx * np.array([0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
polygon[:, 1] = sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
if edgecolors == 'none':
edgecolors = 'face'
if xscale == 'log' or yscale == 'log':
polygons = np.expand_dims(polygon, 0) + np.expand_dims(offsets, 1)
if xscale == 'log':
polygons[:, :, 0] = 10.0 ** polygons[:, :, 0]
xmin = 10.0 ** xmin
xmax = 10.0 ** xmax
self.set_xscale(xscale)
if yscale == 'log':
polygons[:, :, 1] = 10.0 ** polygons[:, :, 1]
ymin = 10.0 ** ymin
ymax = 10.0 ** ymax
self.set_yscale(yscale)
collection = mcoll.PolyCollection(
polygons,
edgecolors=edgecolors,
linewidths=linewidths,
)
else:
collection = mcoll.PolyCollection(
[polygon],
edgecolors=edgecolors,
linewidths=linewidths,
offsets=offsets,
transOffset=mtransforms.IdentityTransform(),
offset_position="data"
)
if isinstance(norm, mcolors.LogNorm):
if (accum == 0).any():
# make sure we have not zeros
accum += 1
# autoscale the norm with curren accum values if it hasn't
# been set
if norm is not None:
if norm.vmin is None and norm.vmax is None:
norm.autoscale(accum)
# Transform accum if needed
if bins == 'log':
accum = np.log10(accum + 1)
elif bins is not None:
if not iterable(bins):
minimum, maximum = min(accum), max(accum)
bins -= 1 # one less edge than bins
bins = minimum + (maximum - minimum) * np.arange(bins) / bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
collection.set_array(accum)
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_alpha(alpha)
collection.update(kwargs)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim(corners)
self.autoscale_view(tight=True)
# add the collection last
self.add_collection(collection, autolim=False)
if not marginals:
return collection
if C is None:
C = np.ones(len(x))
def coarse_bin(x, y, coarse):
ind = coarse.searchsorted(x).clip(0, len(coarse) - 1)
mus = np.zeros(len(coarse))
for i in range(len(coarse)):
mu = reduce_C_function(y[ind == i])
mus[i] = mu
return mus
coarse = np.linspace(xmin, xmax, gridsize)
xcoarse = coarse_bin(xorig, C, coarse)
valid = ~np.isnan(xcoarse)
verts, values = [], []
for i, val in enumerate(xcoarse):
thismin = coarse[i]
if i < len(coarse) - 1:
thismax = coarse[i + 1]
else:
thismax = thismin + np.diff(coarse)[-1]
if not valid[i]:
continue
verts.append([(thismin, 0),
(thismin, 0.05),
(thismax, 0.05),
(thismax, 0)])
values.append(val)
values = np.array(values)
trans = self.get_xaxis_transform(which='grid')
hbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
hbar.set_array(values)
hbar.set_cmap(cmap)
hbar.set_norm(norm)
hbar.set_alpha(alpha)
hbar.update(kwargs)
self.add_collection(hbar, autolim=False)
coarse = np.linspace(ymin, ymax, gridsize)
ycoarse = coarse_bin(yorig, C, coarse)
valid = ~np.isnan(ycoarse)
verts, values = [], []
for i, val in enumerate(ycoarse):
thismin = coarse[i]
if i < len(coarse) - 1:
thismax = coarse[i + 1]
else:
thismax = thismin + np.diff(coarse)[-1]
if not valid[i]:
continue
verts.append([(0, thismin), (0.0, thismax),
(0.05, thismax), (0.05, thismin)])
values.append(val)
values = np.array(values)
trans = self.get_yaxis_transform(which='grid')
vbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
vbar.set_array(values)
vbar.set_cmap(cmap)
vbar.set_norm(norm)
vbar.set_alpha(alpha)
vbar.update(kwargs)
self.add_collection(vbar, autolim=False)
collection.hbar = hbar
collection.vbar = vbar
def on_changed(collection):
hbar.set_cmap(collection.get_cmap())
hbar.set_clim(collection.get_clim())
vbar.set_cmap(collection.get_cmap())
vbar.set_clim(collection.get_clim())
collection.callbacksSM.connect('changed', on_changed)
return collection
@docstring.dedent_interpd
def arrow(self, x, y, dx, dy, **kwargs):
"""
Add an arrow to the axes.
Call signature::
arrow(x, y, dx, dy, **kwargs)
Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,
*y* + *dy*). Uses FancyArrow patch to construct the arrow.
The resulting arrow is affected by the axes aspect ratio and limits.
This may produce an arrow whose head is not square with its stem. To
create an arrow whose head is square with its stem, use
:meth:`annotate` for example::
ax.annotate("", xy=(0.5, 0.5), xytext=(0, 0),
arrowprops=dict(arrowstyle="->"))
Optional kwargs control the arrow construction and properties:
%(FancyArrow)s
**Example:**
.. plot:: mpl_examples/pylab_examples/arrow_demo.py
"""
# Strip away units for the underlying patch since units
# do not make sense to most patch-like code
x = self.convert_xunits(x)
y = self.convert_yunits(y)
dx = self.convert_xunits(dx)
dy = self.convert_yunits(dy)
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_artist(a)
return a
def quiverkey(self, *args, **kw):
qk = mquiver.QuiverKey(*args, **kw)
self.add_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
def quiver(self, *args, **kw):
if not self._hold:
self.cla()
q = mquiver.Quiver(self, *args, **kw)
self.add_collection(q, autolim=True)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
def stackplot(self, x, *args, **kwargs):
return mstack.stackplot(self, x, *args, **kwargs)
stackplot.__doc__ = mstack.stackplot.__doc__
def streamplot(self, x, y, u, v, density=1, linewidth=None, color=None,
cmap=None, norm=None, arrowsize=1, arrowstyle='-|>',
minlength=0.1, transform=None, zorder=1):
if not self._hold:
self.cla()
stream_container = mstream.streamplot(self, x, y, u, v,
density=density,
linewidth=linewidth,
color=color,
cmap=cmap,
norm=norm,
arrowsize=arrowsize,
arrowstyle=arrowstyle,
minlength=minlength,
transform=transform,
zorder=zorder)
return stream_container
streamplot.__doc__ = mstream.streamplot.__doc__
@docstring.dedent_interpd
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
**Example:**
.. plot:: mpl_examples/pylab_examples/barb_demo.py
"""
if not self._hold:
self.cla()
b = mquiver.Barbs(self, *args, **kw)
self.add_collection(b, autolim=True)
self.autoscale_view()
return b
@docstring.dedent_interpd
def fill(self, *args, **kwargs):
"""
Plot filled polygons.
Call signature::
fill(*args, **kwargs)
*args* is a variable length argument, allowing for multiple
*x*, *y* pairs with an optional color format string; see
:func:`~matplotlib.pyplot.plot` for details on the argument
parsing. For example, to plot a polygon with vertices at *x*,
*y* in blue.::
ax.fill(x,y, 'b' )
An arbitrary number of *x*, *y*, *color* groups can be specified::
ax.fill(x1, y1, 'g', x2, y2, 'r')
Return value is a list of :class:`~matplotlib.patches.Patch`
instances that were added.
The same color strings that :func:`~matplotlib.pyplot.plot`
supports are supported by the fill format string.
If you would like to fill below a curve, e.g., shade a region
between 0 and *y* along *x*, use :meth:`fill_between`
The *closed* kwarg will close the polygon when *True* (default).
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/lines_bars_and_markers/fill_demo.py
"""
if not self._hold:
self.cla()
patches = []
for poly in self._get_patches_for_fill(*args, **kwargs):
self.add_patch(poly)
patches.append(poly)
self.autoscale_view()
return patches
@docstring.dedent_interpd
def fill_between(self, x, y1, y2=0, where=None, interpolate=False,
**kwargs):
"""
Make filled polygons between two curves.
Call signature::
fill_between(x, y1, y2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *y1* and *y2* where
``where==True``
*x* :
An N-length array of the x data
*y1* :
An N-length array (or scalar) of the y data
*y2* :
An N-length array (or scalar) of the y data
*where* :
If *None*, default to fill between everywhere. If not *None*,
it is an N-length numpy boolean array and the fill will
only happen over the regions where ``where==True``.
*interpolate* :
If *True*, interpolate between the two lines to find the
precise point of intersection. Otherwise, the start and
end points of the filled region will only occur on explicit
values in the *x* array.
*kwargs* :
Keyword args passed on to the
:class:`~matplotlib.collections.PolyCollection`.
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_between_demo.py
.. seealso::
:meth:`fill_betweenx`
for filling between two sets of x-values
"""
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = ma.masked_invalid(self.convert_xunits(x))
y1 = ma.masked_invalid(self.convert_yunits(y1))
y2 = ma.masked_invalid(self.convert_yunits(y2))
if y1.ndim == 0:
y1 = np.ones_like(x) * y1
if y2.ndim == 0:
y2 = np.ones_like(x) * y2
if where is None:
where = np.ones(len(x), np.bool)
else:
where = np.asarray(where, np.bool)
if not (x.shape == y1.shape == y2.shape == where.shape):
raise ValueError("Argument dimensions are incompatible")
mask = reduce(ma.mask_or, [ma.getmask(a) for a in (x, y1, y2)])
if mask is not ma.nomask:
where &= ~mask
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2 * N + 2, 2), np.float)
if interpolate:
def get_interp_point(ind):
im1 = max(ind - 1, 0)
x_values = x[im1:ind + 1]
diff_values = y1[im1:ind + 1] - y2[im1:ind + 1]
y1_values = y1[im1:ind + 1]
if len(diff_values) == 2:
if np.ma.is_masked(diff_values[1]):
return x[im1], y1[im1]
elif np.ma.is_masked(diff_values[0]):
return x[ind], y1[ind]
diff_order = diff_values.argsort()
diff_root_x = np.interp(
0, diff_values[diff_order], x_values[diff_order])
diff_root_y = np.interp(diff_root_x, x_values, y1_values)
return diff_root_x, diff_root_y
start = get_interp_point(ind0)
end = get_interp_point(ind1)
else:
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
start = xslice[0], y2slice[0]
end = xslice[-1], y2slice[-1]
X[0] = start
X[N + 1] = end
X[1:N + 1, 0] = xslice
X[1:N + 1, 1] = y1slice
X[N + 2:, 0] = xslice[::-1]
X[N + 2:, 1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.ignore_existing_data_limits = False
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection, autolim=False)
self.autoscale_view()
return collection
@docstring.dedent_interpd
def fill_betweenx(self, y, x1, x2=0, where=None, **kwargs):
"""
Make filled polygons between two horizontal curves.
Call signature::
fill_betweenx(y, x1, x2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *x1* and *x2* where
``where==True``
*y* :
An N-length array of the y data
*x1* :
An N-length array (or scalar) of the x data
*x2* :
An N-length array (or scalar) of the x data
*where* :
If *None*, default to fill between everywhere. If not *None*,
it is a N length numpy boolean array and the fill will
only happen over the regions where ``where==True``
*kwargs* :
keyword args passed on to the
:class:`~matplotlib.collections.PolyCollection`
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_betweenx_demo.py
.. seealso::
:meth:`fill_between`
for filling between two sets of y-values
"""
# Handle united data, such as dates
self._process_unit_info(ydata=y, xdata=x1, kwargs=kwargs)
self._process_unit_info(xdata=x2)
# Convert the arrays so we can work with them
y = ma.masked_invalid(self.convert_yunits(y))
x1 = ma.masked_invalid(self.convert_xunits(x1))
x2 = ma.masked_invalid(self.convert_xunits(x2))
if x1.ndim == 0:
x1 = np.ones_like(y) * x1
if x2.ndim == 0:
x2 = np.ones_like(y) * x2
if where is None:
where = np.ones(len(y), np.bool)
else:
where = np.asarray(where, np.bool)
if not (y.shape == x1.shape == x2.shape == where.shape):
raise ValueError("Argument dimensions are incompatible")
mask = reduce(ma.mask_or, [ma.getmask(a) for a in (y, x1, x2)])
if mask is not ma.nomask:
where &= ~mask
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
yslice = y[ind0:ind1]
x1slice = x1[ind0:ind1]
x2slice = x2[ind0:ind1]
if not len(yslice):
continue
N = len(yslice)
Y = np.zeros((2 * N + 2, 2), np.float)
# the purpose of the next two lines is for when x2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the x1 sample points do
Y[0] = x2slice[0], yslice[0]
Y[N + 1] = x2slice[-1], yslice[-1]
Y[1:N + 1, 0] = x1slice
Y[1:N + 1, 1] = yslice
Y[N + 2:, 0] = x2slice[::-1]
Y[N + 2:, 1] = yslice[::-1]
polys.append(Y)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
X1Y = np.array([x1[where], y[where]]).T
X2Y = np.array([x2[where], y[where]]).T
self.dataLim.update_from_data_xy(X1Y, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.ignore_existing_data_limits = False
self.dataLim.update_from_data_xy(X2Y, self.ignore_existing_data_limits,
updatex=True, updatey=False)
self.add_collection(collection, autolim=False)
self.autoscale_view()
return collection
#### plotting z(x,y): imshow, pcolor and relatives, contour
@docstring.dedent_interpd
def imshow(self, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=None, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
Display an image on the axes.
Parameters
-----------
X : array_like, shape (n, m) or (n, m, 3) or (n, m, 4)
Display the image in `X` to current axes. `X` may be a float
array, a uint8 array or a PIL image. If `X` is an array, it
can have the following shapes:
- MxN -- luminance (grayscale, float array only)
- MxNx3 -- RGB (float or uint8 array)
- MxNx4 -- RGBA (float or uint8 array)
The value for each component of MxNx3 and MxNx4 float arrays
should be in the range 0.0 to 1.0; MxN float arrays may be
normalised.
cmap : `~matplotlib.colors.Colormap`, optional, default: None
If None, default to rc `image.cmap` value. `cmap` is ignored when
`X` has RGB(A) information
aspect : ['auto' | 'equal' | scalar], optional, default: None
If 'auto', changes the image aspect ratio to match that of the
axes.
If 'equal', and `extent` is None, changes the axes aspect ratio to
match that of the image. If `extent` is not `None`, the axes
aspect ratio is changed to match that of the extent.
If None, default to rc ``image.aspect`` value.
interpolation : string, optional, default: None
Acceptable values are 'none', 'nearest', 'bilinear', 'bicubic',
'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser',
'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc',
'lanczos'
If `interpolation` is None, default to rc `image.interpolation`.
See also the `filternorm` and `filterrad` parameters.
If `interpolation` is 'none', then no interpolation is performed
on the Agg, ps and pdf backends. Other backends will fall back to
'nearest'.
norm : `~matplotlib.colors.Normalize`, optional, default: None
A `~matplotlib.colors.Normalize` instance is used to scale
luminance data to 0, 1. If `None`, use the default
func:`normalize`. `norm` is only used if `X` is an array of
floats.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with norm to normalize
luminance data. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
alpha : scalar, optional, default: None
The alpha blending value, between 0 (transparent) and 1 (opaque)
origin : ['upper' | 'lower'], optional, default: None
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. If None, default to rc `image.origin`.
extent : scalars (left, right, bottom, top), optional, default: None
The location, in data-coordinates, of the lower-left and
upper-right corners. If `None`, the image is positioned such that
the pixel centers fall on zero-based (row, column) indices.
shape : scalars (columns, rows), optional, default: None
For raw buffer images
filternorm : scalar, optional, default: 1
A parameter for the antigrain image resize filter. From the
antigrain documentation, if `filternorm` = 1, the filter
normalizes integer values and corrects the rounding errors. It
doesn't do anything with the source floating point values, it
corrects only integers according to the rule of 1.0 which means
that any sum of pixel weights must be equal to 1.0. So, the
filter function must produce a graph of the proper shape.
filterrad : scalar, optional, default: 4.0
The filter radius for filters that have a radius parameter, i.e.
when interpolation is one of: 'sinc', 'lanczos' or 'blackman'
Returns
--------
image : `~matplotlib.image.AxesImage`
Other parameters
----------------
kwargs : `~matplotlib.artist.Artist` properties.
See also
--------
matshow : Plot a matrix or an array as an image.
Examples
--------
.. plot:: mpl_examples/pylab_examples/image_demo.py
"""
if not self._hold:
self.cla()
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
if aspect is None:
aspect = rcParams['image.aspect']
self.set_aspect(aspect)
im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
if im.get_clip_path() is None:
# image does not already have clipping set, clip to axes patch
im.set_clip_path(self.patch)
#if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
im.set_url(url)
# update ax.dataLim, and, if autoscaling, set viewLim
# to tightly fit the image, regardless of dataLim.
im.set_extent(im.get_extent())
self.add_image(im)
return im
@staticmethod
def _pcolorargs(funcname, *args, **kw):
# This takes one kwarg, allmatch.
# If allmatch is True, then the incoming X, Y, C must
# have matching dimensions, taking into account that
# X and Y can be 1-D rather than 2-D. This perfect
# match is required for Gouroud shading. For flat
# shading, X and Y specify boundaries, so we need
# one more boundary than color in each direction.
# For convenience, and consistent with Matlab, we
# discard the last row and/or column of C if necessary
# to meet this condition. This is done if allmatch
# is False.
allmatch = kw.pop("allmatch", False)
if len(args) == 1:
C = args[0]
numRows, numCols = C.shape
if allmatch:
X, Y = np.meshgrid(np.arange(numCols), np.arange(numRows))
else:
X, Y = np.meshgrid(np.arange(numCols + 1),
np.arange(numRows + 1))
return X, Y, C
if len(args) == 3:
X, Y, C = args
numRows, numCols = C.shape
else:
raise TypeError(
'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if len(X.shape) != 2 or X.shape[0] == 1:
x = X.reshape(1, Nx)
X = x.repeat(Ny, axis=0)
if len(Y.shape) != 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y inputs to %s; see help(%s)' % (
funcname, funcname))
if allmatch:
if not (Nx == numCols and Ny == numRows):
raise TypeError('Dimensions of C %s are incompatible with'
' X (%d) and/or Y (%d); see help(%s)' % (
C.shape, Nx, Ny, funcname))
else:
if not (numCols in (Nx, Nx - 1) and numRows in (Ny, Ny - 1)):
raise TypeError('Dimensions of C %s are incompatible with'
' X (%d) and/or Y (%d); see help(%s)' % (
C.shape, Nx, Ny, funcname))
C = C[:Ny - 1, :Nx - 1]
return X, Y, C
@docstring.dedent_interpd
def pcolor(self, *args, **kwargs):
"""
Create a pseudocolor plot of a 2-D array.
.. note::
pcolor can be very slow for large arrays; consider
using the similar but much faster
:func:`~matplotlib.pyplot.pcolormesh` instead.
Call signatures::
pcolor(C, **kwargs)
pcolor(X, Y, C, **kwargs)
*C* is the array of color values.
*X* and *Y*, if given, specify the (*x*, *y*) coordinates of
the colored quadrilaterals; the quadrilateral for C[i,j] has
corners at::
(X[i, j], Y[i, j]),
(X[i, j+1], Y[i, j+1]),
(X[i+1, j], Y[i+1, j]),
(X[i+1, j+1], Y[i+1, j+1]).
Ideally the dimensions of *X* and *Y* should be one greater
than those of *C*; if the dimensions are the same, then the
last row and column of *C* will be ignored.
Note that the the column index corresponds to the
*x*-coordinate, and the row index corresponds to *y*; for
details, see the :ref:`Grid Orientation
<axes-pcolor-grid-orientation>` section below.
If either or both of *X* and *Y* are 1-D arrays or column vectors,
they will be expanded as needed into the appropriate 2-D arrays,
making a rectangular grid.
*X*, *Y* and *C* may be masked arrays. If either C[i, j], or one
of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],
[i, j+1],[i+1, j+1]) is masked, nothing is plotted.
Keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*, use
rc settings.
*norm*: [ *None* | Normalize ]
An :class:`matplotlib.colors.Normalize` instance is used
to scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ *None* | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either is *None*, it
is autoscaled to the respective min or max
of the color array *C*. If not *None*, *vmin* or
*vmax* passed in here override any pre-existing values
supplied in the *norm* instance.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
MATLAB.
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='none'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ *None* | ``'none'`` | color | color sequence]
If *None*, the rc setting is used by default.
If ``'none'``, edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: ``0 <= scalar <= 1`` or *None*
the alpha blending value
*snap*: bool
Whether to snap the mesh to pixel boundaries.
Return value is a :class:`matplotlib.collections.Collection`
instance.
.. _axes-pcolor-grid-orientation:
The grid orientation follows the MATLAB convention: an
array *C* with shape (*nrows*, *ncolumns*) is plotted with
the column number as *X* and the row number as *Y*, increasing
up; hence it is plotted the way the array would be printed,
except that the *Y* axis is reversed. That is, *C* is taken
as *C*(*y*, *x*).
Similarly for :func:`meshgrid`::
x = np.arange(5)
y = np.arange(3)
X, Y = np.meshgrid(x, y)
is equivalent to::
X = array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Y = array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2]])
so if you have::
C = rand(len(x), len(y))
then you need to transpose C::
pcolor(X, Y, C.T)
or::
pcolor(C.T)
MATLAB :func:`pcolor` always discards the last row and column
of *C*, but matplotlib displays the last row and column if *X* and
*Y* are not specified, or if *X* and *Y* have one more row and
column than *C*.
kwargs can be used to control the
:class:`~matplotlib.collections.PolyCollection` properties:
%(PolyCollection)s
.. note::
The default *antialiaseds* is False if the default
*edgecolors*="none" is used. This eliminates artificial lines
at patch boundaries, and works regardless of the value of
alpha. If *edgecolors* is not "none", then the default
*antialiaseds* is taken from
rcParams['patch.antialiased'], which defaults to *True*.
Stroking the edges may be preferred if *alpha* is 1, but
will cause artifacts otherwise.
.. seealso::
:func:`~matplotlib.pyplot.pcolormesh`
For an explanation of the differences between
pcolor and pcolormesh.
"""
if not self._hold:
self.cla()
alpha = kwargs.pop('alpha', None)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if 'shading' in kwargs:
cbook.warn_deprecated(
'1.2', name='shading', alternative='edgecolors',
obj_type='option')
shading = kwargs.pop('shading', 'flat')
X, Y, C = self._pcolorargs('pcolor', *args, allmatch=False)
Ny, Nx = X.shape
# unit conversion allows e.g. datetime objects as axis values
self._process_unit_info(xdata=X, ydata=Y, kwargs=kwargs)
X = self.convert_xunits(X)
Y = self.convert_yunits(Y)
# convert to MA, if necessary.
C = ma.asarray(C)
X = ma.asarray(X)
Y = ma.asarray(Y)
mask = ma.getmaskarray(X) + ma.getmaskarray(Y)
xymask = (mask[0:-1, 0:-1] + mask[1:, 1:] +
mask[0:-1, 1:] + mask[1:, 0:-1])
# don't plot if C or any of the surrounding vertices are masked.
mask = ma.getmaskarray(C) + xymask
newaxis = np.newaxis
compress = np.compress
ravelmask = (mask == 0).ravel()
X1 = compress(ravelmask, ma.filled(X[0:-1, 0:-1]).ravel())
Y1 = compress(ravelmask, ma.filled(Y[0:-1, 0:-1]).ravel())
X2 = compress(ravelmask, ma.filled(X[1:, 0:-1]).ravel())
Y2 = compress(ravelmask, ma.filled(Y[1:, 0:-1]).ravel())
X3 = compress(ravelmask, ma.filled(X[1:, 1:]).ravel())
Y3 = compress(ravelmask, ma.filled(Y[1:, 1:]).ravel())
X4 = compress(ravelmask, ma.filled(X[0:-1, 1:]).ravel())
Y4 = compress(ravelmask, ma.filled(Y[0:-1, 1:]).ravel())
npoly = len(X1)
xy = np.concatenate((X1[:, newaxis], Y1[:, newaxis],
X2[:, newaxis], Y2[:, newaxis],
X3[:, newaxis], Y3[:, newaxis],
X4[:, newaxis], Y4[:, newaxis],
X1[:, newaxis], Y1[:, newaxis]),
axis=1)
verts = xy.reshape((npoly, 5, 2))
C = compress(ravelmask, ma.filled(C[0:Ny - 1, 0:Nx - 1]).ravel())
linewidths = (0.25,)
if 'linewidth' in kwargs:
kwargs['linewidths'] = kwargs.pop('linewidth')
kwargs.setdefault('linewidths', linewidths)
if shading == 'faceted':
edgecolors = 'k',
else:
edgecolors = 'none'
if 'edgecolor' in kwargs:
kwargs['edgecolors'] = kwargs.pop('edgecolor')
ec = kwargs.setdefault('edgecolors', edgecolors)
# aa setting will default via collections to patch.antialiased
# unless the boundary is not stroked, in which case the
# default will be False; with unstroked boundaries, aa
# makes artifacts that are often disturbing.
if 'antialiased' in kwargs:
kwargs['antialiaseds'] = kwargs.pop('antialiased')
if 'antialiaseds' not in kwargs and (is_string_like(ec) and
ec.lower() == "none"):
kwargs['antialiaseds'] = False
kwargs.setdefault('snap', False)
collection = mcoll.PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_clim(vmin, vmax)
collection.autoscale_None()
self.grid(False)
x = X.compressed()
y = Y.compressed()
# Transform from native to data coordinates?
t = collection._transform
if (not isinstance(t, mtransforms.Transform)
and hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
if t and any(t.contains_branch_seperately(self.transData)):
trans_to_data = t - self.transData
pts = np.vstack([x, y]).T.astype(np.float)
transformed_pts = trans_to_data.transform(pts)
x = transformed_pts[..., 0]
y = transformed_pts[..., 1]
minx = np.amin(x)
maxx = np.amax(x)
miny = np.amin(y)
maxy = np.amax(y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
self.add_collection(collection, autolim=False)
return collection
@docstring.dedent_interpd
def pcolormesh(self, *args, **kwargs):
"""
Plot a quadrilateral mesh.
Call signatures::
pcolormesh(C)
pcolormesh(X, Y, C)
pcolormesh(C, **kwargs)
Create a pseudocolor plot of a 2-D array.
pcolormesh is similar to :func:`~matplotlib.pyplot.pcolor`,
but uses a different mechanism and returns a different
object; pcolor returns a
:class:`~matplotlib.collections.PolyCollection` but pcolormesh
returns a
:class:`~matplotlib.collections.QuadMesh`. It is much faster,
so it is almost always preferred for large arrays.
*C* may be a masked array, but *X* and *Y* may not. Masked
array support is implemented via *cmap* and *norm*; in
contrast, :func:`~matplotlib.pyplot.pcolor` simply does not
draw quadrilaterals with masked colors or vertices.
Keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*, use
rc settings.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ *None* | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either is *None*, it
is autoscaled to the respective min or max
of the color array *C*. If not *None*, *vmin* or
*vmax* passed in here override any pre-existing values
supplied in the *norm* instance.
*shading*: [ 'flat' | 'gouraud' ]
'flat' indicates a solid color for each quad. When
'gouraud', each quad will be Gouraud shaded. When gouraud
shading, edgecolors is ignored.
*edgecolors*: [*None* | ``'None'`` | ``'face'`` | color |
color sequence]
If *None*, the rc setting is used by default.
If ``'None'``, edges will not be visible.
If ``'face'``, edges will have the same color as the faces.
An mpl color or sequence of colors will set the edge color
*alpha*: ``0 <= scalar <= 1`` or *None*
the alpha blending value
Return value is a :class:`matplotlib.collections.QuadMesh`
object.
kwargs can be used to control the
:class:`matplotlib.collections.QuadMesh` properties:
%(QuadMesh)s
.. seealso::
:func:`~matplotlib.pyplot.pcolor`
For an explanation of the grid orientation and the
expansion of 1-D *X* and/or *Y* to 2-D arrays.
"""
if not self._hold:
self.cla()
alpha = kwargs.pop('alpha', None)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat').lower()
antialiased = kwargs.pop('antialiased', False)
kwargs.setdefault('edgecolors', 'None')
allmatch = (shading == 'gouraud')
X, Y, C = self._pcolorargs('pcolormesh', *args, allmatch=allmatch)
Ny, Nx = X.shape
# convert to one dimensional arrays
C = C.ravel()
X = X.ravel()
Y = Y.ravel()
# unit conversion allows e.g. datetime objects as axis values
self._process_unit_info(xdata=X, ydata=Y, kwargs=kwargs)
X = self.convert_xunits(X)
Y = self.convert_yunits(Y)
coords = np.zeros(((Nx * Ny), 2), dtype=float)
coords[:, 0] = X
coords[:, 1] = Y
collection = mcoll.QuadMesh(
Nx - 1, Ny - 1, coords,
antialiased=antialiased, shading=shading, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_clim(vmin, vmax)
collection.autoscale_None()
self.grid(False)
# Transform from native to data coordinates?
t = collection._transform
if (not isinstance(t, mtransforms.Transform)
and hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
if t and any(t.contains_branch_seperately(self.transData)):
trans_to_data = t - self.transData
pts = np.vstack([X, Y]).T.astype(np.float)
transformed_pts = trans_to_data.transform(pts)
X = transformed_pts[..., 0]
Y = transformed_pts[..., 1]
minx = np.amin(X)
maxx = np.amax(X)
miny = np.amin(Y)
maxy = np.amax(Y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
self.add_collection(collection, autolim=False)
return collection
@docstring.dedent_interpd
def pcolorfast(self, *args, **kwargs):
"""
pseudocolor plot of a 2-D array
Experimental; this is a pcolor-type method that
provides the fastest possible rendering with the Agg
backend, and that can handle any quadrilateral grid.
It supports only flat shading (no outlines), it lacks
support for log scaling of the axes, and it does not
have a pyplot wrapper.
Call signatures::
ax.pcolorfast(C, **kwargs)
ax.pcolorfast(xr, yr, C, **kwargs)
ax.pcolorfast(x, y, C, **kwargs)
ax.pcolorfast(X, Y, C, **kwargs)
C is the 2D array of color values corresponding to quadrilateral
cells. Let (nr, nc) be its shape. C may be a masked array.
``ax.pcolorfast(C, **kwargs)`` is equivalent to
``ax.pcolorfast([0,nc], [0,nr], C, **kwargs)``
*xr*, *yr* specify the ranges of *x* and *y* corresponding to the
rectangular region bounding *C*. If::
xr = [x0, x1]
and::
yr = [y0,y1]
then *x* goes from *x0* to *x1* as the second index of *C* goes
from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of
cell (0,0), and (*x1*, *y1*) is the outermost corner of cell
(*nr*-1, *nc*-1). All cells are rectangles of the same size.
This is the fastest version.
*x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively,
giving the x and y boundaries of the cells. Hence the cells are
rectangular but the grid may be nonuniform. The speed is
intermediate. (The grid is checked, and if found to be
uniform the fast version is used.)
*X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify
the (x,y) coordinates of the corners of the colored
quadrilaterals; the quadrilateral for C[i,j] has corners at
(X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]),
(X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular.
This is the most general, but the slowest to render. It may
produce faster and more compact output using ps, pdf, and
svg backends, however.
Note that the the column index corresponds to the x-coordinate,
and the row index corresponds to y; for details, see
the "Grid Orientation" section below.
Optional keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance from cm. If *None*,
use rc settings.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to scale
luminance data to 0,1. If *None*, defaults to normalize()
*vmin*/*vmax*: [ *None* | scalar ]
*vmin* and *vmax* are used in conjunction with norm to normalize
luminance data. If either are *None*, the min and max
of the color array *C* is used. If you pass a norm instance,
*vmin* and *vmax* will be *None*.
*alpha*: ``0 <= scalar <= 1`` or *None*
the alpha blending value
Return value is an image if a regular or rectangular grid
is specified, and a :class:`~matplotlib.collections.QuadMesh`
collection in the general quadrilateral case.
"""
if not self._hold:
self.cla()
alpha = kwargs.pop('alpha', None)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
C = args[-1]
nr, nc = C.shape
if len(args) == 1:
style = "image"
x = [0, nc]
y = [0, nr]
elif len(args) == 3:
x, y = args[:2]
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1 and y.ndim == 1:
if x.size == 2 and y.size == 2:
style = "image"
else:
dx = np.diff(x)
dy = np.diff(y)
if (np.ptp(dx) < 0.01 * np.abs(dx.mean()) and
np.ptp(dy) < 0.01 * np.abs(dy.mean())):
style = "image"
else:
style = "pcolorimage"
elif x.ndim == 2 and y.ndim == 2:
style = "quadmesh"
else:
raise TypeError("arguments do not match valid signatures")
else:
raise TypeError("need 1 argument or 3 arguments")
if style == "quadmesh":
# convert to one dimensional arrays
# This should also be moved to the QuadMesh class
C = ma.ravel(C) # data point in each cell is value
# at lower left corner
X = x.ravel()
Y = y.ravel()
Nx = nc + 1
Ny = nr + 1
# The following needs to be cleaned up; the renderer
# requires separate contiguous arrays for X and Y,
# but the QuadMesh class requires the 2D array.
coords = np.empty(((Nx * Ny), 2), np.float64)
coords[:, 0] = X
coords[:, 1] = Y
# The QuadMesh class can also be changed to
# handle relevant superclass kwargs; the initializer
# should do much more than it does now.
collection = mcoll.QuadMesh(nc, nr, coords, 0, edgecolors="None")
collection.set_alpha(alpha)
collection.set_array(C)
collection.set_cmap(cmap)
collection.set_norm(norm)
self.add_collection(collection, autolim=False)
xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()
ret = collection
else:
# One of the image styles:
xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
if style == "image":
im = mimage.AxesImage(self, cmap, norm,
interpolation='nearest',
origin='lower',
extent=(xl, xr, yb, yt),
**kwargs)
im.set_data(C)
im.set_alpha(alpha)
self.add_image(im)
ret = im
if style == "pcolorimage":
im = mimage.PcolorImage(self, x, y, C,
cmap=cmap,
norm=norm,
alpha=alpha,
**kwargs)
self.add_image(im)
ret = im
if vmin is not None or vmax is not None:
ret.set_clim(vmin, vmax)
else:
ret.autoscale_None()
self.update_datalim(np.array([[xl, yb], [xr, yt]]))
self.autoscale_view(tight=True)
return ret
def contour(self, *args, **kwargs):
if not self._hold:
self.cla()
kwargs['filled'] = False
return mcontour.QuadContourSet(self, *args, **kwargs)
contour.__doc__ = mcontour.QuadContourSet.contour_doc
def contourf(self, *args, **kwargs):
if not self._hold:
self.cla()
kwargs['filled'] = True
return mcontour.QuadContourSet(self, *args, **kwargs)
contourf.__doc__ = mcontour.QuadContourSet.contour_doc
def clabel(self, CS, *args, **kwargs):
return CS.clabel(*args, **kwargs)
clabel.__doc__ = mcontour.ContourSet.clabel.__doc__
@docstring.dedent_interpd
def table(self, **kwargs):
"""
Add a table to the current axes.
Call signature::
table(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
Returns a :class:`matplotlib.table.Table` instance. For finer
grained control over tables, use the
:class:`~matplotlib.table.Table` class and add it to the axes
with :meth:`~matplotlib.axes.Axes.add_table`.
Thanks to John Gill for providing the class and table.
kwargs control the :class:`~matplotlib.table.Table`
properties:
%(Table)s
"""
return mtable.table(self, **kwargs)
#### Data analysis
@docstring.dedent_interpd
def hist(self, x, bins=10, range=None, normed=False, weights=None,
cumulative=False, bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False,
color=None, label=None, stacked=False,
**kwargs):
"""
Plot a histogram.
Compute and draw the histogram of *x*. The return value is a
tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,
[*patches0*, *patches1*,...]) if the input contains multiple
data.
Multiple data can be provided via *x* as a list of datasets
of potentially different length ([*x0*, *x1*, ...]), or as
a 2-D ndarray in which each column is a dataset. Note that
the ndarray form is transposed relative to the list form.
Masked arrays are not supported at present.
Parameters
----------
x : (n,) array or sequence of (n,) arrays
Input values, this takes either a single array or a sequency of
arrays which are not required to be of the same length
bins : integer or array_like, optional
If an integer is given, `bins + 1` bin edges are returned,
consistently with :func:`numpy.histogram` for numpy version >=
1.3.
Unequally spaced bins are supported if `bins` is a sequence.
default is 10
range : tuple or None, optional
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, `range` is (x.min(), x.max()). Range
has no effect if `bins` is a sequence.
If `bins` is a sequence or `range` is specified, autoscaling
is based on the specified bin range instead of the
range of x.
Default is ``None``
normed : boolean, optional
If `True`, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
``n/(len(x)`dbin)``, i.e., the integral of the histogram will sum
to 1. If *stacked* is also *True*, the sum of the histograms is
normalized to 1.
Default is ``False``
weights : (n, ) array_like or None, optional
An array of weights, of the same shape as `x`. Each value in `x`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1.
Default is ``None``
cumulative : boolean, optional
If `True`, then a histogram is computed where each bin gives the
counts in that bin plus all bins for smaller values. The last bin
gives the total number of datapoints. If `normed` is also `True`
then the histogram is normalized such that the last bin equals 1.
If `cumulative` evaluates to less than 0 (e.g., -1), the direction
of accumulation is reversed. In this case, if `normed` is also
`True`, then the histogram is normalized such that the first bin
equals 1.
Default is ``False``
bottom : array_like, scalar, or None
Location of the bottom baseline of each bin. If a scalar,
the base line for each bin is shifted by the same amount.
If an array, each bin is shifted independently and the length
of bottom must match the number of bins. If None, defaults to 0.
Default is ``None``
histtype : {'bar', 'barstacked', 'step', 'stepfilled'}, optional
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
Default is 'bar'
align : {'left', 'mid', 'right'}, optional
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
Default is 'mid'
orientation : {'horizontal', 'vertical'}, optional
If 'horizontal', `~matplotlib.pyplot.barh` will be used for
bar-type histograms and the *bottom* kwarg will be the left edges.
rwidth : scalar or None, optional
The relative width of the bars as a fraction of the bin width. If
`None`, automatically compute the width.
Ignored if `histtype` is 'step' or 'stepfilled'.
Default is ``None``
log : boolean, optional
If `True`, the histogram axis will be set to a log scale. If `log`
is `True` and `x` is a 1D array, empty bins will be filtered out
and only the non-empty (`n`, `bins`, `patches`) will be returned.
Default is ``False``
color : color or array_like of colors or None, optional
Color spec or sequence of color specs, one per dataset. Default
(`None`) uses the standard line color sequence.
Default is ``None``
label : string or None, optional
String, or sequence of strings to match multiple datasets. Bar
charts yield multiple patches per dataset, but only the first gets
the label, so that the legend command will work as expected.
default is ``None``
stacked : boolean, optional
If `True`, multiple data are stacked on top of each other If
`False` multiple data are aranged side by side if histtype is
'bar' or on top of each other if histtype is 'step'
Default is ``False``
Returns
-------
n : array or list of arrays
The values of the histogram bins. See **normed** and **weights**
for a description of the possible semantics. If input **x** is an
array, then this is an array of length **nbins**. If input is a
sequence arrays ``[data1, data2,..]``, then this is a list of
arrays with the values of the histograms for each of the arrays
in the same order.
bins : array
The edges of the bins. Length nbins + 1 (nbins left edges and right
edge of last bin). Always a single array even when multiple data
sets are passed in.
patches : list or list of lists
Silent list of individual patches used to create the histogram
or list of such list if multiple input datasets.
Other Parameters
----------------
kwargs : `~matplotlib.patches.Patch` properties
See also
--------
hist2d : 2D histograms
Notes
-----
Until numpy release 1.5, the underlying numpy histogram function was
incorrect with `normed`=`True` if bin sizes were unequal. MPL
inherited that error. It is now corrected within MPL when using
earlier numpy versions.
Examples
--------
.. plot:: mpl_examples/statistics/histogram_demo_features.py
"""
if not self._hold:
self.cla()
# xrange becomes range after 2to3
bin_range = range
range = __builtins__["range"]
# NOTE: the range keyword overwrites the built-in func range !!!
# needs to be fixed in numpy !!!
# Validate string inputs here so we don't have to clutter
# subsequent code.
if histtype not in ['bar', 'barstacked', 'step', 'stepfilled']:
raise ValueError("histtype %s is not recognized" % histtype)
if align not in ['left', 'mid', 'right']:
raise ValueError("align kwarg %s is not recognized" % align)
if orientation not in ['horizontal', 'vertical']:
raise ValueError(
"orientation kwarg %s is not recognized" % orientation)
if histtype == 'barstacked' and not stacked:
stacked = True
# Check whether bins or range are given explicitly.
binsgiven = (cbook.iterable(bins) or bin_range is not None)
# basic input validation
flat = np.ravel(x)
if len(flat) == 0:
raise ValueError("x must have at least one data point")
elif len(flat) == 1 and not binsgiven:
raise ValueError(
"x has only one data point. bins or range kwarg must be given")
# Massage 'x' for processing.
# NOTE: Be sure any changes here is also done below to 'weights'
if isinstance(x, np.ndarray) or not iterable(x[0]):
# TODO: support masked arrays;
x = np.asarray(x)
if x.ndim == 2:
x = x.T # 2-D input with columns as datasets; switch to rows
elif x.ndim == 1:
x = x.reshape(1, x.shape[0]) # new view, single row
else:
raise ValueError("x must be 1D or 2D")
if x.shape[1] < x.shape[0]:
warnings.warn(
'2D hist input should be nsamples x nvariables;\n '
'this looks transposed (shape is %d x %d)' % x.shape[::-1])
else:
# multiple hist with data of different length
x = [np.asarray(xi) for xi in x]
nx = len(x) # number of datasets
if color is None:
color = [six.next(self._get_lines.color_cycle)
for i in xrange(nx)]
else:
color = mcolors.colorConverter.to_rgba_array(color)
if len(color) != nx:
raise ValueError("color kwarg must have one color per dataset")
# We need to do to 'weights' what was done to 'x'
if weights is not None:
if isinstance(weights, np.ndarray) or not iterable(weights[0]):
w = np.array(weights)
if w.ndim == 2:
w = w.T
elif w.ndim == 1:
w.shape = (1, w.shape[0])
else:
raise ValueError("weights must be 1D or 2D")
else:
w = [np.asarray(wi) for wi in weights]
if len(w) != nx:
raise ValueError('weights should have the same shape as x')
for i in xrange(nx):
if len(w[i]) != len(x[i]):
raise ValueError(
'weights should have the same shape as x')
else:
w = [None]*nx
# Save the datalimits for the same reason:
_saved_bounds = self.dataLim.bounds
# If bins are not specified either explicitly or via range,
# we need to figure out the range required for all datasets,
# and supply that to np.histogram.
if not binsgiven:
xmin = np.inf
xmax = -np.inf
for xi in x:
if len(xi) > 0:
xmin = min(xmin, xi.min())
xmax = max(xmax, xi.max())
bin_range = (xmin, xmax)
#hist_kwargs = dict(range=range, normed=bool(normed))
# We will handle the normed kwarg within mpl until we
# get to the point of requiring numpy >= 1.5.
hist_kwargs = dict(range=bin_range)
n = []
mlast = None
for i in xrange(nx):
# this will automatically overwrite bins,
# so that each histogram uses the same bins
m, bins = np.histogram(x[i], bins, weights=w[i], **hist_kwargs)
m = m.astype(float) # causes problems later if it's an int
if mlast is None:
mlast = np.zeros(len(bins)-1, m.dtype)
if normed and not stacked:
db = np.diff(bins)
m = (m.astype(float) / db) / m.sum()
if stacked:
if mlast is None:
mlast = np.zeros(len(bins)-1, m.dtype)
m += mlast
mlast[:] = m
n.append(m)
if stacked and normed:
db = np.diff(bins)
for m in n:
m[:] = (m.astype(float) / db) / n[-1].sum()
if cumulative:
slc = slice(None)
if cbook.is_numlike(cumulative) and cumulative < 0:
slc = slice(None, None, -1)
if normed:
n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n]
else:
n = [m[slc].cumsum()[slc] for m in n]
patches = []
if histtype.startswith('bar'):
# Save autoscale state for later restoration; turn autoscaling
# off so we can do it all a single time at the end, instead
# of having it done by bar or fill and then having to be redone.
_saved_autoscalex = self.get_autoscalex_on()
_saved_autoscaley = self.get_autoscaley_on()
self.set_autoscalex_on(False)
self.set_autoscaley_on(False)
totwidth = np.diff(bins)
if rwidth is not None:
dr = min(1.0, max(0.0, rwidth))
elif len(n) > 1:
dr = 0.8
else:
dr = 1.0
if histtype == 'bar' and not stacked:
width = dr*totwidth/nx
dw = width
if nx > 1:
boffset = -0.5*dr*totwidth*(1.0-1.0/nx)
else:
boffset = 0.0
stacked = False
elif histtype == 'barstacked' or stacked:
width = dr*totwidth
boffset, dw = 0.0, 0.0
if align == 'mid' or align == 'edge':
boffset += 0.5*totwidth
elif align == 'right':
boffset += totwidth
if orientation == 'horizontal':
_barfunc = self.barh
bottom_kwarg = 'left'
else: # orientation == 'vertical'
_barfunc = self.bar
bottom_kwarg = 'bottom'
for m, c in zip(n, color):
if bottom is None:
bottom = np.zeros(len(m), np.float)
if stacked:
height = m - bottom
else:
height = m
patch = _barfunc(bins[:-1]+boffset, height, width,
align='center', log=log,
color=c, **{bottom_kwarg: bottom})
patches.append(patch)
if stacked:
bottom[:] = m
boffset += dw
self.set_autoscalex_on(_saved_autoscalex)
self.set_autoscaley_on(_saved_autoscaley)
self.autoscale_view()
elif histtype.startswith('step'):
# these define the perimeter of the polygon
x = np.zeros(4 * len(bins) - 3, np.float)
y = np.zeros(4 * len(bins) - 3, np.float)
x[0:2*len(bins)-1:2], x[1:2*len(bins)-1:2] = bins, bins[:-1]
x[2*len(bins)-1:] = x[1:2*len(bins)-1][::-1]
if bottom is None:
bottom = np.zeros(len(bins)-1, np.float)
y[1:2*len(bins)-1:2], y[2:2*len(bins):2] = bottom, bottom
y[2*len(bins)-1:] = y[1:2*len(bins)-1][::-1]
if log:
if orientation == 'horizontal':
self.set_xscale('log', nonposx='clip')
logbase = self.xaxis._scale.base
else: # orientation == 'vertical'
self.set_yscale('log', nonposy='clip')
logbase = self.yaxis._scale.base
# Setting a minimum of 0 results in problems for log plots
if normed or weights is not None:
# For normed data, set to log base * minimum data value
# (gives 1 full tick-label unit for the lowest filled bin)
ndata = np.array(n)
minimum = (np.min(ndata[ndata > 0])) / logbase
else:
# For non-normed data, set the min to log base,
# again so that there is 1 full tick-label unit
# for the lowest bin
minimum = 1.0 / logbase
y[0], y[-1] = minimum, minimum
else:
minimum = np.min(bins)
if align == 'left' or align == 'center':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
# If fill kwarg is set, it will be passed to the patch collection,
# overriding this
fill = (histtype == 'stepfilled')
xvals, yvals = [], []
for m in n:
if stacked:
# starting point for drawing polygon
y[0] = y[1]
# top of the previous polygon becomes the bottom
y[2*len(bins)-1:] = y[1:2*len(bins)-1][::-1]
# set the top of this polygon
y[1:2*len(bins)-1:2], y[2:2*len(bins):2] = (m + bottom,
m + bottom)
if log:
y[y < minimum] = minimum
if orientation == 'horizontal':
xvals.append(y.copy())
yvals.append(x.copy())
else:
xvals.append(x.copy())
yvals.append(y.copy())
if fill:
# add patches in reverse order so that when stacking,
# items lower in the stack are plottted on top of
# items higher in the stack
for x, y, c in reversed(list(zip(xvals, yvals, color))):
patches.append(self.fill(
x, y,
closed=True,
facecolor=c))
else:
for x, y, c in reversed(list(zip(xvals, yvals, color))):
split = 2 * len(bins)
patches.append(self.fill(
x[:split], y[:split],
closed=False, edgecolor=c,
fill=False))
# we return patches, so put it back in the expected order
patches.reverse()
# adopted from adjust_x/ylim part of the bar method
if orientation == 'horizontal':
xmin0 = max(_saved_bounds[0]*0.9, minimum)
xmax = self.dataLim.intervalx[1]
for m in n:
if np.sum(m) > 0: # make sure there are counts
xmin = np.amin(m[m != 0])
# filter out the 0 height bins
xmin = max(xmin*0.9, minimum)
xmin = min(xmin0, xmin)
self.dataLim.intervalx = (xmin, xmax)
elif orientation == 'vertical':
ymin0 = max(_saved_bounds[1]*0.9, minimum)
ymax = self.dataLim.intervaly[1]
for m in n:
if np.sum(m) > 0: # make sure there are counts
ymin = np.amin(m[m != 0])
# filter out the 0 height bins
ymin = max(ymin*0.9, minimum)
ymin = min(ymin0, ymin)
self.dataLim.intervaly = (ymin, ymax)
if label is None:
labels = [None]
elif is_string_like(label):
labels = [label]
else:
labels = [str(lab) for lab in label]
for (patch, lbl) in zip_longest(patches, labels, fillvalue=None):
if patch:
p = patch[0]
p.update(kwargs)
if lbl is not None:
p.set_label(lbl)
p.set_snap(False)
for p in patch[1:]:
p.update(kwargs)
p.set_label('_nolegend_')
if binsgiven:
if orientation == 'vertical':
self.update_datalim(
[(bins[0], 0), (bins[-1], 0)], updatey=False)
else:
self.update_datalim(
[(0, bins[0]), (0, bins[-1])], updatex=False)
if nx == 1:
return n[0], bins, cbook.silent_list('Patch', patches[0])
else:
return n, bins, cbook.silent_list('Lists of Patches', patches)
@docstring.dedent_interpd
def hist2d(self, x, y, bins=10, range=None, normed=False, weights=None,
cmin=None, cmax=None, **kwargs):
"""
Make a 2D histogram plot.
Parameters
----------
x, y: array_like, shape (n, )
Input values
bins: [None | int | [int, int] | array_like | [array, array]]
The bin specification:
- If int, the number of bins for the two dimensions
(nx=ny=bins).
- If [int, int], the number of bins in each dimension
(nx, ny = bins).
- If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
- If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
The default value is 10.
range : array_like shape(2, 2), optional, default: None
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the bins parameters): [[xmin,
xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
normed : boolean, optional, default: False
Normalize histogram.
weights : array_like, shape (n, ), optional, default: None
An array of values w_i weighing each sample (x_i, y_i).
cmin : scalar, optional, default: None
All bins that has count less than cmin will not be displayed and
these count values in the return value count histogram will also
be set to nan upon return
cmax : scalar, optional, default: None
All bins that has count more than cmax will not be displayed (set
to none before passing to imshow) and these count values in the
return value count histogram will also be set to nan upon return
Returns
-------
The return value is ``(counts, xedges, yedges, Image)``.
Other parameters
-----------------
kwargs : :meth:`pcolorfast` properties.
See also
--------
hist : 1D histogram
Notes
-----
Rendering the histogram with a logarithmic color scale is
accomplished by passing a :class:`colors.LogNorm` instance to
the *norm* keyword argument. Likewise, power-law normalization
(similar in effect to gamma correction) can be accomplished with
:class:`colors.PowerNorm`.
Examples
--------
.. plot:: mpl_examples/pylab_examples/hist2d_demo.py
"""
# xrange becomes range after 2to3
bin_range = range
range = __builtins__["range"]
h, xedges, yedges = np.histogram2d(x, y, bins=bins, range=bin_range,
normed=normed, weights=weights)
if cmin is not None:
h[h < cmin] = None
if cmax is not None:
h[h > cmax] = None
pc = self.pcolorfast(xedges, yedges, h.T, **kwargs)
self.set_xlim(xedges[0], xedges[-1])
self.set_ylim(yedges[0], yedges[-1])
return h, xedges, yedges, pc
@docstring.dedent_interpd
def psd(self, x, NFFT=None, Fs=None, Fc=None, detrend=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, return_line=None, **kwargs):
"""
Plot the power spectral density.
Call signature::
psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, return_line=None, **kwargs)
The power spectral density :math:`P_{xx}` by Welch's average
periodogram method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute :math:`P_{xx}`,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between segments.
The default value is 0 (no overlap).
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*return_line*: bool
Whether to include the line object plotted in the returned values.
Default is False.
If *return_line* is False, returns the tuple (*Pxx*, *freqs*).
If *return_line* is True, returns the tuple (*Pxx*, *freqs*. *line*):
*Pxx*: 1-D array
The values for the power spectrum `P_{xx}` before scaling
(real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *Pxx*
*line*: a :class:`~matplotlib.lines.Line2D` instance
The line created by this function.
Only returend if *return_line* is True.
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/psd_demo.py
.. seealso::
:func:`specgram`
:func:`specgram` differs in the default overlap; in not
returning the mean of the segment periodograms; in returning
the times of the segments; and in plotting a colormap instead
of a line.
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` plots the magnitude spectrum.
:func:`csd`
:func:`csd` plots the spectral density between two signals.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
pxx, freqs = mlab.psd(x=x, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
pxx.shape = len(freqs),
freqs += Fc
if scale_by_freq in (None, True):
psd_units = 'dB/Hz'
else:
psd_units = 'dB'
line = self.plot(freqs, 10 * np.log10(pxx), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Power Spectral Density (%s)' % psd_units)
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax - vmin
logi = int(np.log10(intv))
if logi == 0:
logi = .1
step = 10 * logi
#print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1
ticks = np.arange(math.floor(vmin), math.ceil(vmax) + 1, step)
self.set_yticks(ticks)
if return_line is None or not return_line:
return pxx, freqs
else:
return pxx, freqs, line
@docstring.dedent_interpd
def csd(self, x, y, NFFT=None, Fs=None, Fc=None, detrend=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, return_line=None, **kwargs):
"""
Plot the cross-spectral density.
Call signature::
csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, return_line=None, **kwargs)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. *noverlap* gives
the length of the overlap between segments. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*: 1-D arrays or sequences
Arrays or sequences containing the data
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between segments.
The default value is 0 (no overlap).
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*return_line*: bool
Whether to include the line object plotted in the returned values.
Default is False.
If *return_line* is False, returns the tuple (*Pxy*, *freqs*).
If *return_line* is True, returns the tuple (*Pxy*, *freqs*. *line*):
*Pxy*: 1-D array
The values for the cross spectrum `P_{xy}` before scaling
(complex valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *Pxy*
*line*: a :class:`~matplotlib.lines.Line2D` instance
The line created by this function.
Only returend if *return_line* is True.
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xy})` for decibels, though `P_{xy}` itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the Line2D properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/csd_demo.py
.. seealso::
:func:`psd`
:func:`psd` is the equivalent to setting y=x.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
pxy, freqs = mlab.csd(x=x, y=y, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
pxy.shape = len(freqs),
# pxy is complex
freqs += Fc
line = self.plot(freqs, 10 * np.log10(np.absolute(pxy)), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Cross Spectrum Magnitude (dB)')
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax - vmin
step = 10 * int(np.log10(intv))
ticks = np.arange(math.floor(vmin), math.ceil(vmax) + 1, step)
self.set_yticks(ticks)
if return_line is None or not return_line:
return pxy, freqs
else:
return pxy, freqs, line
@docstring.dedent_interpd
def magnitude_spectrum(self, x, Fs=None, Fc=None, window=None,
pad_to=None, sides=None, scale=None,
**kwargs):
"""
Plot the magnitude spectrum.
Call signature::
magnitude_spectrum(x, Fs=2, Fc=0, window=mlab.window_hanning,
pad_to=None, sides='default', **kwargs)
Compute the magnitude spectrum of *x*. Data is padded to a
length of *pad_to* and the windowing function *window* is applied to
the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
*scale*: [ 'default' | 'linear' | 'dB' ]
The scaling of the values in the *spec*. 'linear' is no scaling.
'dB' returns the values in dB scale. When *mode* is 'density',
this is dB power (10 * log10). Otherwise this is dB amplitude
(20 * log10). 'default' is 'linear'.
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*spectrum*, *freqs*, *line*):
*spectrum*: 1-D array
The values for the magnitude spectrum before scaling (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
*line*: a :class:`~matplotlib.lines.Line2D` instance
The line created by this function
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/spectrum_demo.py
.. seealso::
:func:`psd`
:func:`psd` plots the power spectral density.`.
:func:`angle_spectrum`
:func:`angle_spectrum` plots the angles of the corresponding
frequencies.
:func:`phase_spectrum`
:func:`phase_spectrum` plots the phase (unwrapped angle) of the
corresponding frequencies.
:func:`specgram`
:func:`specgram` can plot the magnitude spectrum of segments
within the signal in a colormap.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
if scale is None or scale == 'default':
scale = 'linear'
spec, freqs = mlab.magnitude_spectrum(x=x, Fs=Fs, window=window,
pad_to=pad_to, sides=sides)
freqs += Fc
if scale == 'linear':
Z = spec
yunits = 'energy'
elif scale == 'dB':
Z = 20. * np.log10(spec)
yunits = 'dB'
else:
raise ValueError('Unknown scale %s', scale)
lines = self.plot(freqs, Z, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Magnitude (%s)' % yunits)
return spec, freqs, lines[0]
@docstring.dedent_interpd
def angle_spectrum(self, x, Fs=None, Fc=None, window=None,
pad_to=None, sides=None, **kwargs):
"""
Plot the angle spectrum.
Call signature::
angle_spectrum(x, Fs=2, Fc=0, window=mlab.window_hanning,
pad_to=None, sides='default', **kwargs)
Compute the angle spectrum (wrapped phase spectrum) of *x*.
Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*spectrum*, *freqs*, *line*):
*spectrum*: 1-D array
The values for the angle spectrum in radians (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
*line*: a :class:`~matplotlib.lines.Line2D` instance
The line created by this function
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/spectrum_demo.py
.. seealso::
:func:`magnitude_spectrum`
:func:`angle_spectrum` plots the magnitudes of the
corresponding frequencies.
:func:`phase_spectrum`
:func:`phase_spectrum` plots the unwrapped version of this
function.
:func:`specgram`
:func:`specgram` can plot the angle spectrum of segments
within the signal in a colormap.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
spec, freqs = mlab.angle_spectrum(x=x, Fs=Fs, window=window,
pad_to=pad_to, sides=sides)
freqs += Fc
lines = self.plot(freqs, spec, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Angle (radians)')
return spec, freqs, lines[0]
@docstring.dedent_interpd
def phase_spectrum(self, x, Fs=None, Fc=None, window=None,
pad_to=None, sides=None, **kwargs):
"""
Plot the phase spectrum.
Call signature::
phase_spectrum(x, Fs=2, Fc=0, window=mlab.window_hanning,
pad_to=None, sides='default', **kwargs)
Compute the phase spectrum (unwrapped angle spectrum) of *x*.
Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*spectrum*, *freqs*, *line*):
*spectrum*: 1-D array
The values for the phase spectrum in radians (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
*line*: a :class:`~matplotlib.lines.Line2D` instance
The line created by this function
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/spectrum_demo.py
.. seealso::
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` plots the magnitudes of the
corresponding frequencies.
:func:`angle_spectrum`
:func:`angle_spectrum` plots the wrapped version of this
function.
:func:`specgram`
:func:`specgram` can plot the phase spectrum of segments
within the signal in a colormap.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
spec, freqs = mlab.phase_spectrum(x=x, Fs=Fs, window=window,
pad_to=pad_to, sides=sides)
freqs += Fc
lines = self.plot(freqs, spec, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Phase (radians)')
return spec, freqs, lines[0]
@docstring.dedent_interpd
def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
Plot the coherence between *x* and *y*.
Call signature::
cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,
window = mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
Plot the coherence between *x* and *y*. Coherence is the
normalized cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between blocks. The
default value is 0 (no overlap).
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
The return value is a tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector.
kwargs are applied to the lines.
References:
* Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D`
properties of the coherence plot:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/cohere_demo.py
"""
if not self._hold:
self.cla()
cxy, freqs = mlab.cohere(x=x, y=y, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap,
scale_by_freq=scale_by_freq)
freqs += Fc
self.plot(freqs, cxy, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Coherence')
self.grid(True)
return cxy, freqs
@docstring.dedent_interpd
def specgram(self, x, NFFT=None, Fs=None, Fc=None, detrend=None,
window=None, noverlap=None,
cmap=None, xextent=None, pad_to=None, sides=None,
scale_by_freq=None, mode=None, scale=None,
vmin=None, vmax=None, **kwargs):
"""
Plot a spectrogram.
Call signature::
specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None, mode='default', scale='default',
**kwargs)
Compute and plot a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the spectrum of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*. The spectrogram is plotted as a colormap
(using imshow).
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
*mode*: [ 'default' | 'psd' | 'magnitude' | 'angle' | 'phase' ]
What sort of spectrum to use. Default is 'psd'. which takes
the power spectral density. 'complex' returns the complex-valued
frequency spectrum. 'magnitude' returns the magnitude spectrum.
'angle' returns the phase spectrum without unwrapping. 'phase'
returns the phase spectrum with unwrapping.
*noverlap*: integer
The number of points of overlap between blocks. The
default value is 128.
*scale*: [ 'default' | 'linear' | 'dB' ]
The scaling of the values in the *spec*. 'linear' is no scaling.
'dB' returns the values in dB scale. When *mode* is 'psd',
this is dB power (10 * log10). Otherwise this is dB amplitude
(20 * log10). 'default' is 'dB' if *mode* is 'psd' or
'magnitude' and 'linear' otherwise. This must be 'linear'
if *mode* is 'angle' or 'phase'.
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*cmap*:
A :class:`matplotlib.colors.Colormap` instance; if *None*, use
default determined by rc
*xextent*:
The image extent along the x-axis. xextent = (xmin,xmax)
The default is (0,max(bins)), where bins is the return
value from :func:`~matplotlib.mlab.specgram`
*kwargs*:
Additional kwargs are passed on to imshow which makes the
specgram image
.. note::
*detrend* and *scale_by_freq* only apply when *mode* is set to
'psd'
Returns the tuple (*spectrum*, *freqs*, *t*, *im*):
*spectrum*: 2-D array
columns are the periodograms of successive segments
*freqs*: 1-D array
The frequencies corresponding to the rows in *spectrum*
*t*: 1-D array
The times corresponding to midpoints of segments (i.e the columns
in *spectrum*)
*im*: instance of class :class:`~matplotlib.image.AxesImage`
The image created by imshow containing the spectrogram
**Example:**
.. plot:: mpl_examples/pylab_examples/specgram_demo.py
.. seealso::
:func:`psd`
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; in not returning
times; and in generating a line plot instead of colormap.
:func:`magnitude_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'magnitude'. Plots a line instead of a colormap.
:func:`angle_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'angle'. Plots a line instead of a colormap.
:func:`phase_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'phase'. Plots a line instead of a colormap.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
if mode == 'complex':
raise ValueError('Cannot plot a complex specgram')
if scale is None or scale == 'default':
if mode in ['angle', 'phase']:
scale = 'linear'
else:
scale = 'dB'
elif mode in ['angle', 'phase'] and scale == 'dB':
raise ValueError('Cannot use dB scale with angle or phase mode')
spec, freqs, t = mlab.specgram(x=x, NFFT=NFFT, Fs=Fs,
detrend=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
mode=mode)
if scale == 'linear':
Z = spec
elif scale == 'dB':
if mode is None or mode == 'default' or mode == 'psd':
Z = 10. * np.log10(spec)
else:
Z = 20. * np.log10(spec)
else:
raise ValueError('Unknown scale %s', scale)
Z = np.flipud(Z)
if xextent is None:
xextent = 0, np.amax(t)
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent, vmin=vmin, vmax=vmax,
**kwargs)
self.axis('auto')
return spec, freqs, t, im
def spy(self, Z, precision=0, marker=None, markersize=None,
aspect='equal', origin="upper", **kwargs):
"""
Plot the sparsity pattern on a 2-D array.
``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.
Parameters
----------
Z : sparse array (n, m)
The array to be plotted.
precision : float, optional, default: 0
If *precision* is 0, any non-zero value will be plotted; else,
values of :math:`|Z| > precision` will be plotted.
For :class:`scipy.sparse.spmatrix` instances, there is a special
case: if *precision* is 'present', any value present in the array
will be plotted, even if it is identically zero.
origin : ["upper", "lower"], optional, default: "upper"
Place the [0,0] index of the array in the upper left or lower left
corner of the axes.
aspect : ['auto' | 'equal' | scalar], optional, default: "equal"
If 'equal', and `extent` is None, changes the axes aspect ratio to
match that of the image. If `extent` is not `None`, the axes
aspect ratio is changed to match that of the extent.
If 'auto', changes the image aspect ratio to match that of the
axes.
If None, default to rc ``image.aspect`` value.
Two plotting styles are available: image or marker. Both
are available for full arrays, but only the marker style
works for :class:`scipy.sparse.spmatrix` instances.
If *marker* and *markersize* are *None*, an image will be
returned and any remaining kwargs are passed to
:func:`~matplotlib.pyplot.imshow`; else, a
:class:`~matplotlib.lines.Line2D` object will be returned with
the value of marker determining the marker type, and any
remaining kwargs passed to the
:meth:`~matplotlib.axes.Axes.plot` method.
If *marker* and *markersize* are *None*, useful kwargs include:
* *cmap*
* *alpha*
See also
--------
imshow : for image options.
plot : for plotting options
"""
if marker is None and markersize is None and hasattr(Z, 'tocoo'):
marker = 's'
if marker is None and markersize is None:
Z = np.asarray(Z)
mask = np.absolute(Z) > precision
if 'cmap' not in kwargs:
kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
name='binary')
nr, nc = Z.shape
extent = [-0.5, nc - 0.5, nr - 0.5, -0.5]
ret = self.imshow(mask, interpolation='nearest', aspect=aspect,
extent=extent, origin=origin, **kwargs)
else:
if hasattr(Z, 'tocoo'):
c = Z.tocoo()
if precision == 'present':
y = c.row
x = c.col
else:
nonzero = np.absolute(c.data) > precision
y = c.row[nonzero]
x = c.col[nonzero]
else:
Z = np.asarray(Z)
nonzero = np.absolute(Z) > precision
y, x = np.nonzero(nonzero)
if marker is None:
marker = 's'
if markersize is None:
markersize = 10
marks = mlines.Line2D(x, y, linestyle='None',
marker=marker, markersize=markersize, **kwargs)
self.add_line(marks)
nr, nc = Z.shape
self.set_xlim(xmin=-0.5, xmax=nc - 0.5)
self.set_ylim(ymin=nr - 0.5, ymax=-0.5)
self.set_aspect(aspect)
ret = marks
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return ret
def matshow(self, Z, **kwargs):
"""
Plot a matrix or array as an image.
The matrix will be shown the way it would be printed, with the first
row at the top. Row and column numbering is zero-based.
Parameters
----------
Z : array_like shape (n, m)
The matrix to be displayed.
Returns
-------
image : `~matplotlib.image.AxesImage`
Other parameters
----------------
kwargs : `~matplotlib.axes.Axes.imshow` arguments
Sets `origin` to 'upper', 'interpolation' to 'nearest' and
'aspect' to equal.
See also
--------
imshow : plot an image
Examples
--------
.. plot:: mpl_examples/pylab_examples/matshow.py
"""
Z = np.asanyarray(Z)
nr, nc = Z.shape
kw = {'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal'} # (already the imshow default)
kw.update(kwargs)
im = self.imshow(Z, **kw)
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return im
def violinplot(self, dataset, positions=None, vert=True, widths=0.5,
showmeans=False, showextrema=True, showmedians=False,
points=100, bw_method=None):
"""Make a violin plot.
Call signature::
violinplot(dataset, positions=None, vert=True, widths=0.5,
showmeans=False, showextrema=True, showmedians=False,
points=100, bw_method=None):
Make a violin plot for each column of *dataset* or each vector in
sequence *dataset*. Each filled area extends to represent the
entire data range, with optional lines at the mean, the median,
the minimum, and the maximum.
Parameters
----------
dataset : Array or a sequence of vectors.
The input data.
positions : array-like, default = [1, 2, ..., n]
Sets the positions of the violins. The ticks and limits are
automatically set to match the positions.
vert : bool, default = True.
If true, creates a vertical violin plot.
Otherwise, creates a horizontal violin plot.
widths : array-like, default = 0.5
Either a scalar or a vector that sets the maximal width of
each violin. The default is 0.5, which uses about half of the
available horizontal space.
showmeans : bool, default = False
If `True`, will toggle rendering of the means.
showextrema : bool, default = True
If `True`, will toggle rendering of the extrema.
showmedians : bool, default = False
If `True`, will toggle rendering of the medians.
points : scalar, default = 100
Defines the number of points to evaluate each of the
gaussian kernel density estimations at.
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a
callable, it should take a `GaussianKDE` instance as its only
parameter and return a scalar. If None (default), 'scott' is used.
Returns
-------
result : dict
A dictionary mapping each component of the violinplot to a
list of the corresponding collection instances created. The
dictionary has the following keys:
- ``bodies``: A list of the
:class:`matplotlib.collections.PolyCollection` instances
containing the filled area of each violin.
- ``means``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the mean values of each of the
violin's distribution.
- ``mins``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the bottom of each violin's
distribution.
- ``maxes``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the top of each violin's
distribution.
- ``bars``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the centers of each violin's
distribution.
- ``medians``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the median values of each of the
violin's distribution.
"""
def _kde_method(X, coords):
kde = mlab.GaussianKDE(X, bw_method)
return kde.evaluate(coords)
vpstats = cbook.violin_stats(dataset, _kde_method, points=points)
return self.violin(vpstats, positions=positions, vert=vert,
widths=widths, showmeans=showmeans,
showextrema=showextrema, showmedians=showmedians)
def violin(self, vpstats, positions=None, vert=True, widths=0.5,
showmeans=False, showextrema=True, showmedians=False):
"""Drawing function for violin plots.
Call signature::
violin(vpstats, positions=None, vert=True, widths=0.5,
showmeans=False, showextrema=True, showmedians=False):
Draw a violin plot for each column of `vpstats`. Each filled area
extends to represent the entire data range, with optional lines at the
mean, the median, the minimum, and the maximum.
Parameters
----------
vpstats : list of dicts
A list of dictionaries containing stats for each violin plot.
Required keys are:
- ``coords``: A list of scalars containing the coordinates that
the violin's kernel density estimate were evaluated at.
- ``vals``: A list of scalars containing the values of the
kernel density estimate at each of the coordinates given
in *coords*.
- ``mean``: The mean value for this violin's dataset.
- ``median``: The median value for this violin's dataset.
- ``min``: The minimum value for this violin's dataset.
- ``max``: The maximum value for this violin's dataset.
positions : array-like, default = [1, 2, ..., n]
Sets the positions of the violins. The ticks and limits are
automatically set to match the positions.
vert : bool, default = True.
If true, plots the violins veritcally.
Otherwise, plots the violins horizontally.
widths : array-like, default = 0.5
Either a scalar or a vector that sets the maximal width of
each violin. The default is 0.5, which uses about half of the
available horizontal space.
showmeans : bool, default = False
If true, will toggle rendering of the means.
showextrema : bool, default = True
If true, will toggle rendering of the extrema.
showmedians : bool, default = False
If true, will toggle rendering of the medians.
Returns
-------
result : dict
A dictionary mapping each component of the violinplot to a
list of the corresponding collection instances created. The
dictionary has the following keys:
- ``bodies``: A list of the
:class:`matplotlib.collections.PolyCollection` instances
containing the filled area of each violin.
- ``means``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the mean values of each of the
violin's distribution.
- ``mins``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the bottom of each violin's
distribution.
- ``maxes``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the top of each violin's
distribution.
- ``bars``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the centers of each violin's
distribution.
- ``medians``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the median values of each of the
violin's distribution.
"""
# Statistical quantities to be plotted on the violins
means = []
mins = []
maxes = []
medians = []
# Collections to be returned
artists = {}
N = len(vpstats)
datashape_message = ("List of violinplot statistics and `{0}` "
"values must have the same length")
# Validate positions
if positions is None:
positions = range(1, N + 1)
elif len(positions) != N:
raise ValueError(datashape_message.format("positions"))
# Validate widths
if np.isscalar(widths):
widths = [widths] * N
elif len(widths) != N:
raise ValueError(datashape_message.format("widths"))
# Calculate ranges for statistics lines
pmins = -0.25 * np.array(widths) + positions
pmaxes = 0.25 * np.array(widths) + positions
# Check whether we are rendering vertically or horizontally
if vert:
fill = self.fill_betweenx
perp_lines = self.hlines
par_lines = self.vlines
else:
fill = self.fill_between
perp_lines = self.vlines
par_lines = self.hlines
# Render violins
bodies = []
for stats, pos, width in zip(vpstats, positions, widths):
# The 0.5 factor reflects the fact that we plot from v-p to
# v+p
vals = np.array(stats['vals'])
vals = 0.5 * width * vals / vals.max()
bodies += [fill(stats['coords'],
-vals + pos,
vals + pos,
facecolor='y',
alpha=0.3)]
means.append(stats['mean'])
mins.append(stats['min'])
maxes.append(stats['max'])
medians.append(stats['median'])
artists['bodies'] = bodies
# Render means
if showmeans:
artists['cmeans'] = perp_lines(means, pmins, pmaxes, colors='r')
# Render extrema
if showextrema:
artists['cmaxes'] = perp_lines(maxes, pmins, pmaxes, colors='r')
artists['cmins'] = perp_lines(mins, pmins, pmaxes, colors='r')
artists['cbars'] = par_lines(positions, mins, maxes, colors='r')
# Render medians
if showmedians:
artists['cmedians'] = perp_lines(medians,
pmins,
pmaxes,
colors='r')
return artists
def tricontour(self, *args, **kwargs):
return mtri.tricontour(self, *args, **kwargs)
tricontour.__doc__ = mtri.TriContourSet.tricontour_doc
def tricontourf(self, *args, **kwargs):
return mtri.tricontourf(self, *args, **kwargs)
tricontourf.__doc__ = mtri.TriContourSet.tricontour_doc
def tripcolor(self, *args, **kwargs):
return mtri.tripcolor(self, *args, **kwargs)
tripcolor.__doc__ = mtri.tripcolor.__doc__
def triplot(self, *args, **kwargs):
return mtri.triplot(self, *args, **kwargs)
triplot.__doc__ = mtri.triplot.__doc__
| lgpl-3.0 |
shahbazn/neutron | neutron/tests/unit/plugins/embrane/test_embrane_neutron_plugin.py | 40 | 2774 | # Copyright 2013 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from oslo_config import cfg
from neutron.plugins.embrane.common import config # noqa
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
PLUGIN_NAME = ('neutron.plugins.embrane.plugins.embrane_fake_plugin.'
'EmbraneFakePlugin')
class EmbranePluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
_plugin_name = PLUGIN_NAME
def setUp(self):
cfg.CONF.set_override('admin_password', "admin123", 'heleos')
p = mock.patch.dict(sys.modules, {'heleosapi': mock.Mock()})
p.start()
# dict patches must be explicitly stopped
self.addCleanup(p.stop)
super(EmbranePluginV2TestCase, self).setUp(self._plugin_name)
class TestEmbraneBasicGet(test_plugin.TestBasicGet, EmbranePluginV2TestCase):
pass
class TestEmbraneV2HTTPResponse(test_plugin.TestV2HTTPResponse,
EmbranePluginV2TestCase):
pass
class TestEmbranePortsV2(test_plugin.TestPortsV2, EmbranePluginV2TestCase):
def test_create_ports_bulk_emulated_plugin_failure(self):
self.skip("Temporary skipping due to incompatibility with the"
" plugin dynamic class type")
def test_recycle_expired_previously_run_within_context(self):
self.skip("Temporary skipping due to incompatibility with the"
" plugin dynamic class type")
def test_recycle_held_ip_address(self):
self.skip("Temporary skipping due to incompatibility with the"
" plugin dynamic class type")
class TestEmbraneNetworksV2(test_plugin.TestNetworksV2,
EmbranePluginV2TestCase):
def test_create_networks_bulk_emulated_plugin_failure(self):
self.skip("Temporary skipping due to incompatibility with the"
" plugin dynamic class type")
class TestEmbraneSubnetsV2(test_plugin.TestSubnetsV2,
EmbranePluginV2TestCase):
def test_create_subnets_bulk_emulated_plugin_failure(self):
self.skip("Temporary skipping due to incompatibility with the"
" plugin dynamic class type")
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.