repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
devonryder/dotfiles | polybar/spotify_status.py | 2 | 2152 | #!/bin/python
import sys
import dbus
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'-t',
'--trunclen',
type=int,
metavar='trunclen'
)
parser.add_argument(
'-f',
'--format',
type=str,
metavar='custom format',
dest='custom_format'
)
parser.add_argument(
'-p',
'--playpause',
type=str,
metavar='play-pause indicator',
dest='play_pause'
)
args = parser.parse_args()
def fix_string(string):
# corrects encoding for the python version used
if sys.version_info.major == 3:
return string
else:
return string.encode('utf-8')
# Default parameters
output = fix_string(u'{play_pause} {artist}: {song}')
trunclen = 25
play_pause = fix_string(u'\u25B6,\u23F8') # first character is play, second is paused
# parameters can be overwritten by args
if args.trunclen is not None:
trunclen = args.trunclen
if args.custom_format is not None:
output = args.custom_format
if args.play_pause is not None:
play_pause = args.play_pause
try:
session_bus = dbus.SessionBus()
spotify_bus = session_bus.get_object(
'org.mpris.MediaPlayer2.spotify',
'/org/mpris/MediaPlayer2'
)
spotify_properties = dbus.Interface(
spotify_bus,
'org.freedesktop.DBus.Properties'
)
metadata = spotify_properties.Get('org.mpris.MediaPlayer2.Player', 'Metadata')
status = spotify_properties.Get('org.mpris.MediaPlayer2.Player', 'PlaybackStatus')
play_pause = play_pause.split(',')
if status == 'Playing':
play_pause = play_pause[0]
elif status == 'Paused':
play_pause = play_pause[1]
else:
play_pause = str()
artist = fix_string(metadata['xesam:artist'][0])
song = fix_string(metadata['xesam:title'])
if len(song) > trunclen:
song = song[0:trunclen]
song += '...'
if ('(' in song) and (')' not in song):
song += ')'
print(output.format(artist=artist, song=song, play_pause=play_pause))
except Exception as e:
if isinstance(e, dbus.exceptions.DBusException):
print('')
else:
print(e)
| unlicense |
EnceladOnline/interfaX | icon.py | 1 | 1967 | from tkinter import *
from tkinter import ttk
import func
class Icon:
def __init__(self, main, icon):
# Affiche les icon sur le tab
self.main = main
self.master = self.main.cache["CurrentTabID"]
self.icon = icon
if self.icon[1][1] == None:
self.icon_label()
else:
self.icon_image()
def icon_label(self):
self.cadre = ttk.Button(self.main.cache["CurrentTabID"],
text = self.icon[0], command = self.launch,
style = "STYLE_B.TButton", takefocus = 0, cursor = "hand2")
self.icon_tagorid = self.main.cache["CurrentTabID"].create_window(self.icon[2][0],
self.icon[2][1], window = self.cadre, anchor = "se")
self.main.cache["CurrentIconID"] = self.cadre
self.main.cache["CurrentIcon"] = self.icon
# Bind
self.cadre.bind("<Button-3>", self.icon_menu_eventhandler)
# Utilisé dans InterfaX 1
# self.cadre.bind("<Motion>", self.icon_title_eventhandler)
def icon_image(self):
try:
self.main.cache[self.icon[0]] = PhotoImage(file = self.icon[1][1])
except:
self.main.cache[self.icon[0]] = None
self.cadre = ttk.Button(self.main.cache["CurrentTabID"],
image = self.main.cache[self.icon[0]], takefocus = 0,
command = self.launch, cursor = "hand2")
self.icon_tagorid = self.main.cache["CurrentTabID"].create_window(self.icon[2][0],
self.icon[2][1], window = self.cadre, anchor = "se")
# Bind
self.cadre.bind("<Button-3>", self.icon_menu_eventhandler)
self.cadre.bind("<Motion>", self.icon_title_eventhandler)
def launch(self):
path_list = self.icon[3]
func.launcher(path_list)
def icon_menu_eventhandler(self, event):
self.main.cache["CurrentIconID"] = self.cadre
self.main.cache["CurrentIcon"] = self.icon
self.main.cache["CurrentIconTAGORID"] = self.icon_tagorid
self.main.icon_menu_eventhandler()
def icon_title_eventhandler(self, event):
self.main.strvar_icon_title.set(self.icon[0])
| gpl-2.0 |
xiangel/hue | desktop/core/ext-py/Pygments-1.3.1/pygments/lexers/text.py | 56 | 57922 | # -*- coding: utf-8 -*-
"""
pygments.lexers.text
~~~~~~~~~~~~~~~~~~~~
Lexers for non-source code file types.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from bisect import bisect
from pygments.lexer import Lexer, LexerContext, RegexLexer, ExtendedRegexLexer, \
bygroups, include, using, this, do_insertions
from pygments.token import Punctuation, Text, Comment, Keyword, Name, String, \
Generic, Operator, Number, Whitespace, Literal
from pygments.util import get_bool_opt
from pygments.lexers.other import BashLexer
__all__ = ['IniLexer', 'SourcesListLexer', 'BaseMakefileLexer',
'MakefileLexer', 'DiffLexer', 'IrcLogsLexer', 'TexLexer',
'GroffLexer', 'ApacheConfLexer', 'BBCodeLexer', 'MoinWikiLexer',
'RstLexer', 'VimLexer', 'GettextLexer', 'SquidConfLexer',
'DebianControlLexer', 'DarcsPatchLexer', 'YamlLexer',
'LighttpdConfLexer', 'NginxConfLexer', 'CMakeLexer']
class IniLexer(RegexLexer):
"""
Lexer for configuration files in INI style.
"""
name = 'INI'
aliases = ['ini', 'cfg']
filenames = ['*.ini', '*.cfg', '*.properties']
mimetypes = ['text/x-ini']
tokens = {
'root': [
(r'\s+', Text),
(r'[;#].*?$', Comment),
(r'\[.*?\]$', Keyword),
(r'(.*?)([ \t]*)(=)([ \t]*)(.*?)$',
bygroups(Name.Attribute, Text, Operator, Text, String))
]
}
def analyse_text(text):
npos = text.find('\n')
if npos < 3:
return False
return text[0] == '[' and text[npos-1] == ']'
class SourcesListLexer(RegexLexer):
"""
Lexer that highlights debian sources.list files.
*New in Pygments 0.7.*
"""
name = 'Debian Sourcelist'
aliases = ['sourceslist', 'sources.list']
filenames = ['sources.list']
mimetype = ['application/x-debian-sourceslist']
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?$', Comment),
(r'^(deb(?:-src)?)(\s+)',
bygroups(Keyword, Text), 'distribution')
],
'distribution': [
(r'#.*?$', Comment, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\s$[]+', String),
(r'\[', String.Other, 'escaped-distribution'),
(r'\$', String),
(r'\s+', Text, 'components')
],
'escaped-distribution': [
(r'\]', String.Other, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\]$]+', String.Other),
(r'\$', String.Other)
],
'components': [
(r'#.*?$', Comment, '#pop:2'),
(r'$', Text, '#pop:2'),
(r'\s+', Text),
(r'\S+', Keyword.Pseudo),
]
}
def analyse_text(text):
for line in text.split('\n'):
line = line.strip()
if not (line.startswith('#') or line.startswith('deb ') or
line.startswith('deb-src ') or not line):
return False
return True
class MakefileLexer(Lexer):
"""
Lexer for BSD and GNU make extensions (lenient enough to handle both in
the same file even).
*Rewritten in Pygments 0.10.*
"""
name = 'Makefile'
aliases = ['make', 'makefile', 'mf', 'bsdmake']
filenames = ['*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile']
mimetypes = ['text/x-makefile']
r_special = re.compile(r'^(?:'
# BSD Make
r'\.\s*(include|undef|error|warning|if|else|elif|endif|for|endfor)|'
# GNU Make
r'\s*(ifeq|ifneq|ifdef|ifndef|else|endif|-?include|define|endef|:))(?=\s)')
r_comment = re.compile(r'^\s*@?#')
def get_tokens_unprocessed(self, text):
ins = []
lines = text.splitlines(True)
done = ''
lex = BaseMakefileLexer(**self.options)
backslashflag = False
for line in lines:
if self.r_special.match(line) or backslashflag:
ins.append((len(done), [(0, Comment.Preproc, line)]))
backslashflag = line.strip().endswith('\\')
elif self.r_comment.match(line):
ins.append((len(done), [(0, Comment, line)]))
else:
done += line
for item in do_insertions(ins, lex.get_tokens_unprocessed(done)):
yield item
class BaseMakefileLexer(RegexLexer):
"""
Lexer for simple Makefiles (no preprocessing).
*New in Pygments 0.10.*
"""
name = 'Makefile'
aliases = ['basemake']
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^(?:[\t ]+.*\n|\n)+', using(BashLexer)),
(r'\$\((?:.*\\\n|.*\n)+', using(BashLexer)),
(r'\s+', Text),
(r'#.*?\n', Comment),
(r'(export)(\s+)(?=[a-zA-Z0-9_${}\t -]+\n)',
bygroups(Keyword, Text), 'export'),
(r'export\s+', Keyword),
# assignment
(r'([a-zA-Z0-9_${}.-]+)(\s*)([!?:+]?=)([ \t]*)((?:.*\\\n|.*\n)+)',
bygroups(Name.Variable, Text, Operator, Text, using(BashLexer))),
# strings
(r'(?s)"(\\\\|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\.|[^'\\])*'", String.Single),
# targets
(r'([^\n:]+)(:+)([ \t]*)', bygroups(Name.Function, Operator, Text),
'block-header'),
# TODO: add paren handling (grr)
],
'export': [
(r'[a-zA-Z0-9_${}-]+', Name.Variable),
(r'\n', Text, '#pop'),
(r'\s+', Text),
],
'block-header': [
(r'[^,\\\n#]+', Number),
(r',', Punctuation),
(r'#.*?\n', Comment),
(r'\\\n', Text), # line continuation
(r'\\.', Text),
(r'(?:[\t ]+.*\n|\n)+', using(BashLexer), '#pop'),
],
}
class DiffLexer(RegexLexer):
"""
Lexer for unified or context-style diffs or patches.
"""
name = 'Diff'
aliases = ['diff', 'udiff']
filenames = ['*.diff', '*.patch']
mimetypes = ['text/x-diff', 'text/x-patch']
tokens = {
'root': [
(r' .*\n', Text),
(r'\+.*\n', Generic.Inserted),
(r'-.*\n', Generic.Deleted),
(r'!.*\n', Generic.Strong),
(r'@.*\n', Generic.Subheading),
(r'([Ii]ndex|diff).*\n', Generic.Heading),
(r'=.*\n', Generic.Heading),
(r'.*\n', Text),
]
}
def analyse_text(text):
if text[:7] == 'Index: ':
return True
if text[:5] == 'diff ':
return True
if text[:4] == '--- ':
return 0.9
DPATCH_KEYWORDS = ['hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move',
'replace']
class DarcsPatchLexer(RegexLexer):
"""
DarcsPatchLexer is a lexer for the various versions of the darcs patch
format. Examples of this format are derived by commands such as
``darcs annotate --patch`` and ``darcs send``.
*New in Pygments 0.10.*
"""
name = 'Darcs Patch'
aliases = ['dpatch']
filenames = ['*.dpatch', '*.darcspatch']
tokens = {
'root': [
(r'<', Operator),
(r'>', Operator),
(r'{', Operator),
(r'}', Operator),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text, Operator)),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text), 'comment'),
(r'New patches:', Generic.Heading),
(r'Context:', Generic.Heading),
(r'Patch bundle hash:', Generic.Heading),
(r'(\s*)(%s)(.*\n)' % '|'.join(DPATCH_KEYWORDS),
bygroups(Text, Keyword, Text)),
(r'\+', Generic.Inserted, "insert"),
(r'-', Generic.Deleted, "delete"),
(r'.*\n', Text),
],
'comment': [
(r'[^\]].*\n', Comment),
(r'\]', Operator, "#pop"),
],
'specialText': [ # darcs add [_CODE_] special operators for clarity
(r'\n', Text, "#pop"), # line-based
(r'\[_[^_]*_]', Operator),
],
'insert': [
include('specialText'),
(r'\[', Generic.Inserted),
(r'[^\n\[]*', Generic.Inserted),
],
'delete': [
include('specialText'),
(r'\[', Generic.Deleted),
(r'[^\n\[]*', Generic.Deleted),
],
}
class IrcLogsLexer(RegexLexer):
"""
Lexer for IRC logs in *irssi*, *xchat* or *weechat* style.
"""
name = 'IRC logs'
aliases = ['irc']
filenames = ['*.weechatlog']
mimetypes = ['text/x-irclog']
flags = re.VERBOSE | re.MULTILINE
timestamp = r"""
(
# irssi / xchat and others
(?: \[|\()? # Opening bracket or paren for the timestamp
(?: # Timestamp
(?: (?:\d{1,4} [-/]?)+ # Date as - or /-separated groups of digits
[T ])? # Date/time separator: T or space
(?: \d?\d [:.]?)+ # Time as :/.-separated groups of 1 or 2 digits
)
(?: \]|\))?\s+ # Closing bracket or paren for the timestamp
|
# weechat
\d{4}\s\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
|
# xchat
\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
)?
"""
tokens = {
'root': [
# log start/end
(r'^\*\*\*\*(.*)\*\*\*\*$', Comment),
# hack
("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)),
# normal msgs
("^" + timestamp + r"""
(\s*<.*?>\s*) # Nick """,
bygroups(Comment.Preproc, Name.Tag), 'msg'),
# /me msgs
("^" + timestamp + r"""
(\s*[*]\s+) # Star
([^\s]+\s+.*?\n) # Nick + rest of message """,
bygroups(Comment.Preproc, Keyword, Generic.Inserted)),
# join/part msgs
("^" + timestamp + r"""
(\s*(?:\*{3}|<?-[!@=P]?->?)\s*) # Star(s) or symbols
([^\s]+\s+) # Nick + Space
(.*?\n) # Rest of message """,
bygroups(Comment.Preproc, Keyword, String, Comment)),
(r"^.*?\n", Text),
],
'msg': [
(r"[^\s]+:(?!//)", Name.Attribute), # Prefix
(r".*\n", Text, '#pop'),
],
}
class BBCodeLexer(RegexLexer):
"""
A lexer that highlights BBCode(-like) syntax.
*New in Pygments 0.6.*
"""
name = 'BBCode'
aliases = ['bbcode']
mimetypes = ['text/x-bbcode']
tokens = {
'root': [
(r'[^[]+', Text),
# tag/end tag begin
(r'\[/?\w+', Keyword, 'tag'),
# stray bracket
(r'\[', Text),
],
'tag': [
(r'\s+', Text),
# attribute with value
(r'(\w+)(=)("?[^\s"\]]+"?)',
bygroups(Name.Attribute, Operator, String)),
# tag argument (a la [color=green])
(r'(=)("?[^\s"\]]+"?)',
bygroups(Operator, String)),
# tag end
(r'\]', Keyword, '#pop'),
],
}
class TexLexer(RegexLexer):
"""
Lexer for the TeX and LaTeX typesetting languages.
"""
name = 'TeX'
aliases = ['tex', 'latex']
filenames = ['*.tex', '*.aux', '*.toc']
mimetypes = ['text/x-tex', 'text/x-latex']
tokens = {
'general': [
(r'%.*?\n', Comment),
(r'[{}]', Name.Builtin),
(r'[&_^]', Name.Builtin),
],
'root': [
(r'\\\[', String.Backtick, 'displaymath'),
(r'\\\(', String, 'inlinemath'),
(r'\$\$', String.Backtick, 'displaymath'),
(r'\$', String, 'inlinemath'),
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
include('general'),
(r'[^\\$%&_^{}]+', Text),
],
'math': [
(r'\\([a-zA-Z]+|.)', Name.Variable),
include('general'),
(r'[0-9]+', Number),
(r'[-=!+*/()\[\]]', Operator),
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
],
'inlinemath': [
(r'\\\)', String, '#pop'),
(r'\$', String, '#pop'),
include('math'),
],
'displaymath': [
(r'\\\]', String, '#pop'),
(r'\$\$', String, '#pop'),
(r'\$', Name.Builtin),
include('math'),
],
'command': [
(r'\[.*?\]', Name.Attribute),
(r'\*', Keyword),
(r'', Text, '#pop'),
],
}
def analyse_text(text):
for start in ("\\documentclass", "\\input", "\\documentstyle",
"\\relax"):
if text[:len(start)] == start:
return True
class GroffLexer(RegexLexer):
"""
Lexer for the (g)roff typesetting language, supporting groff
extensions. Mainly useful for highlighting manpage sources.
*New in Pygments 0.6.*
"""
name = 'Groff'
aliases = ['groff', 'nroff', 'man']
filenames = ['*.[1234567]', '*.man']
mimetypes = ['application/x-troff', 'text/troff']
tokens = {
'root': [
(r'(?i)(\.)(\w+)', bygroups(Text, Keyword), 'request'),
(r'\.', Punctuation, 'request'),
# Regular characters, slurp till we find a backslash or newline
(r'[^\\\n]*', Text, 'textline'),
],
'textline': [
include('escapes'),
(r'[^\\\n]+', Text),
(r'\n', Text, '#pop'),
],
'escapes': [
# groff has many ways to write escapes.
(r'\\"[^\n]*', Comment),
(r'\\[fn]\w', String.Escape),
(r'\\\(..', String.Escape),
(r'\\.\[.*\]', String.Escape),
(r'\\.', String.Escape),
(r'\\\n', Text, 'request'),
],
'request': [
(r'\n', Text, '#pop'),
include('escapes'),
(r'"[^\n"]+"', String.Double),
(r'\d+', Number),
(r'\S+', String),
(r'\s+', Text),
],
}
def analyse_text(text):
if text[:1] != '.':
return False
if text[:3] == '.\\"':
return True
if text[:4] == '.TH ':
return True
if text[1:3].isalnum() and text[3].isspace():
return 0.9
class ApacheConfLexer(RegexLexer):
"""
Lexer for configuration files following the Apache config file
format.
*New in Pygments 0.6.*
"""
name = 'ApacheConf'
aliases = ['apacheconf', 'aconf', 'apache']
filenames = ['.htaccess', 'apache.conf', 'apache2.conf']
mimetypes = ['text/x-apacheconf']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#.*?)$', Comment),
(r'(<[^\s>]+)(?:(\s+)(.*?))?(>)',
bygroups(Name.Tag, Text, String, Name.Tag)),
(r'([a-zA-Z][a-zA-Z0-9]*)(\s+)',
bygroups(Name.Builtin, Text), 'value'),
(r'\.+', Text),
],
'value': [
(r'$', Text, '#pop'),
(r'[^\S\n]+', Text),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'\d+', Number),
(r'/([a-zA-Z0-9][a-zA-Z0-9_./-]+)', String.Other),
(r'(on|off|none|any|all|double|email|dns|min|minimal|'
r'os|productonly|full|emerg|alert|crit|error|warn|'
r'notice|info|debug|registry|script|inetd|standalone|'
r'user|group)\b', Keyword),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'[^\s"]+', Text)
]
}
class MoinWikiLexer(RegexLexer):
"""
For MoinMoin (and Trac) Wiki markup.
*New in Pygments 0.7.*
"""
name = 'MoinMoin/Trac Wiki markup'
aliases = ['trac-wiki', 'moin']
filenames = []
mimetypes = ['text/x-trac-wiki']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^#.*$', Comment),
(r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next
# Titles
(r'^(=+)([^=]+)(=+)(\s*#.+)?$',
bygroups(Generic.Heading, using(this), Generic.Heading, String)),
# Literal code blocks, with optional shebang
(r'({{{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'),
(r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting
# Lists
(r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)),
(r'^( +)([a-zivx]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)),
# Other Formatting
(r'\[\[\w+.*?\]\]', Keyword), # Macro
(r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])',
bygroups(Keyword, String, Keyword)), # Link
(r'^----+$', Keyword), # Horizontal rules
(r'[^\n\'\[{!_~^,|]+', Text),
(r'\n', Text),
(r'.', Text),
],
'codeblock': [
(r'}}}', Name.Builtin, '#pop'),
# these blocks are allowed to be nested in Trac, but not MoinMoin
(r'{{{', Text, '#push'),
(r'[^{}]+', Comment.Preproc), # slurp boring text
(r'.', Comment.Preproc), # allow loose { or }
],
}
class RstLexer(RegexLexer):
"""
For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup.
*New in Pygments 0.7.*
Additional options accepted:
`handlecodeblocks`
Highlight the contents of ``.. sourcecode:: langauge`` and
``.. code:: language`` directives with a lexer for the given
language (default: ``True``). *New in Pygments 0.8.*
"""
name = 'reStructuredText'
aliases = ['rst', 'rest', 'restructuredtext']
filenames = ['*.rst', '*.rest']
mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"]
flags = re.MULTILINE
def _handle_sourcecode(self, match):
from pygments.lexers import get_lexer_by_name
from pygments.util import ClassNotFound
# section header
yield match.start(1), Punctuation, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator.Word, match.group(3)
yield match.start(4), Punctuation, match.group(4)
yield match.start(5), Text, match.group(5)
yield match.start(6), Keyword, match.group(6)
yield match.start(7), Text, match.group(7)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name(match.group(6).strip())
except ClassNotFound:
pass
indention = match.group(8)
indention_size = len(indention)
code = (indention + match.group(9) + match.group(10) + match.group(11))
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(8), String, code
return
# highlight the lines with the lexer.
ins = []
codelines = code.splitlines(True)
code = ''
for line in codelines:
if len(line) > indention_size:
ins.append((len(code), [(0, Text, line[:indention_size])]))
code += line[indention_size:]
else:
code += line
for item in do_insertions(ins, lexer.get_tokens_unprocessed(code)):
yield item
tokens = {
'root': [
# Heading with overline
(r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
r'(.+)(\n)(\1)(\n)',
bygroups(Generic.Heading, Text, Generic.Heading,
Text, Generic.Heading, Text)),
# Plain heading
(r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|'
r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)',
bygroups(Generic.Heading, Text, Generic.Heading, Text)),
# Bulleted lists
(r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered lists
(r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered, but keep words at BOL from becoming lists
(r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
# Line blocks
(r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
bygroups(Text, Operator, using(this, state='inline'))),
# Sourcecode directives
(r'^( *\.\.)(\s*)((?:source)?code)(::)([ \t]*)([^\n]+)'
r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)',
_handle_sourcecode),
# A directive
(r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
using(this, state='inline'))),
# A reference target
(r'^( *\.\.)(\s*)([\w\t ]+:)(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A footnote target
(r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A substitution def
(r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
Punctuation, Text, using(this, state='inline'))),
# Comments
(r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
# Field list
(r'^( *)(:[a-zA-Z-]+:)(\s*)$', bygroups(Text, Name.Class, Text)),
(r'^( *)(:.*?:)([ \t]+)(.*?)$',
bygroups(Text, Name.Class, Text, Name.Function)),
# Definition list
(r'^([^ ].*(?<!::)\n)((?:(?: +.*)\n)+)',
bygroups(using(this, state='inline'), using(this, state='inline'))),
# Code blocks
(r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*|)\n)+)',
bygroups(String.Escape, Text, String, String, Text, String)),
include('inline'),
],
'inline': [
(r'\\.', Text), # escape
(r'``', String, 'literal'), # code
(r'(`.+?)(<.+?>)(`__?)', # reference with inline target
bygroups(String, String.Interpol, String)),
(r'`.+?`__?', String), # reference
(r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
bygroups(Name.Variable, Name.Attribute)), # role
(r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
bygroups(Name.Attribute, Name.Variable)), # role (content first)
(r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
(r'\*.+?\*', Generic.Emph), # Emphasis
(r'\[.*?\]_', String), # Footnote or citation
(r'<.+?>', Name.Tag), # Hyperlink
(r'[^\\\n\[*`:]+', Text),
(r'.', Text),
],
'literal': [
(r'[^`\\]+', String),
(r'\\.', String),
(r'``', String, '#pop'),
(r'[`\\]', String),
]
}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options)
def analyse_text(text):
if text[:2] == '..' and text[2:3] != '.':
return 0.3
p1 = text.find("\n")
p2 = text.find("\n", p1 + 1)
if (p2 > -1 and # has two lines
p1 * 2 + 1 == p2 and # they are the same length
text[p1+1] in '-=' and # the next line both starts and ends with
text[p1+1] == text[p2-1]): # ...a sufficiently high header
return 0.5
class VimLexer(RegexLexer):
"""
Lexer for VimL script files.
*New in Pygments 0.8.*
"""
name = 'VimL'
aliases = ['vim']
filenames = ['*.vim', '.vimrc']
mimetypes = ['text/x-vim']
flags = re.MULTILINE
tokens = {
'root': [
# Who decided that doublequote was a good comment character??
(r'^\s*".*', Comment),
(r'(?<=\s)"[^\-:.%#=*].*', Comment),
(r'[ \t]+', Text),
# TODO: regexes can have other delims
(r'/(\\\\|\\/|[^\n/])*/', String.Regex),
(r'"(\\\\|\\"|[^\n"])*"', String.Double),
(r"'(\\\\|\\'|[^\n'])*'", String.Single),
(r'-?\d+', Number),
(r'#[0-9a-f]{6}', Number.Hex),
(r'^:', Punctuation),
(r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent.
(r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b',
Keyword),
(r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin),
(r'\b\w+\b', Name.Other), # These are postprocessed below
(r'.', Text),
],
}
def __init__(self, **options):
from pygments.lexers._vimbuiltins import command, option, auto
self._cmd = command
self._opt = option
self._aut = auto
RegexLexer.__init__(self, **options)
def is_in(self, w, mapping):
r"""
It's kind of difficult to decide if something might be a keyword
in VimL because it allows you to abbreviate them. In fact,
'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are
valid ways to call it so rather than making really awful regexps
like::
\bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b
we match `\b\w+\b` and then call is_in() on those tokens. See
`scripts/get_vimkw.py` for how the lists are extracted.
"""
p = bisect(mapping, (w,))
if p > 0:
if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \
mapping[p-1][1][:len(w)] == w: return True
if p < len(mapping):
return mapping[p][0] == w[:len(mapping[p][0])] and \
mapping[p][1][:len(w)] == w
return False
def get_tokens_unprocessed(self, text):
# TODO: builtins are only subsequent tokens on lines
# and 'keywords' only happen at the beginning except
# for :au ones
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name.Other:
if self.is_in(value, self._cmd):
yield index, Keyword, value
elif self.is_in(value, self._opt) or \
self.is_in(value, self._aut):
yield index, Name.Builtin, value
else:
yield index, Text, value
else:
yield index, token, value
class GettextLexer(RegexLexer):
"""
Lexer for Gettext catalog files.
*New in Pygments 0.9.*
"""
name = 'Gettext Catalog'
aliases = ['pot', 'po']
filenames = ['*.pot', '*.po']
mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext']
tokens = {
'root': [
(r'^#,\s.*?$', Keyword.Type),
(r'^#:\s.*?$', Keyword.Declaration),
#(r'^#$', Comment),
(r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single),
(r'^(")([\w-]*:)(.*")$',
bygroups(String, Name.Property, String)),
(r'^".*"$', String),
(r'^(msgid|msgid_plural|msgstr)(\s+)(".*")$',
bygroups(Name.Variable, Text, String)),
(r'^(msgstr\[)(\d)(\])(\s+)(".*")$',
bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)),
]
}
class SquidConfLexer(RegexLexer):
"""
Lexer for `squid <http://www.squid-cache.org/>`_ configuration files.
*New in Pygments 0.9.*
"""
name = 'SquidConf'
aliases = ['squidconf', 'squid.conf', 'squid']
filenames = ['squid.conf']
mimetypes = ['text/x-squidconf']
flags = re.IGNORECASE
keywords = [ "acl", "always_direct", "announce_host",
"announce_period", "announce_port", "announce_to",
"anonymize_headers", "append_domain", "as_whois_server",
"auth_param_basic", "authenticate_children",
"authenticate_program", "authenticate_ttl", "broken_posts",
"buffered_logs", "cache_access_log", "cache_announce",
"cache_dir", "cache_dns_program", "cache_effective_group",
"cache_effective_user", "cache_host", "cache_host_acl",
"cache_host_domain", "cache_log", "cache_mem",
"cache_mem_high", "cache_mem_low", "cache_mgr",
"cachemgr_passwd", "cache_peer", "cache_peer_access",
"cahce_replacement_policy", "cache_stoplist",
"cache_stoplist_pattern", "cache_store_log", "cache_swap",
"cache_swap_high", "cache_swap_log", "cache_swap_low",
"client_db", "client_lifetime", "client_netmask",
"connect_timeout", "coredump_dir", "dead_peer_timeout",
"debug_options", "delay_access", "delay_class",
"delay_initial_bucket_level", "delay_parameters",
"delay_pools", "deny_info", "dns_children", "dns_defnames",
"dns_nameservers", "dns_testnames", "emulate_httpd_log",
"err_html_text", "fake_user_agent", "firewall_ip",
"forwarded_for", "forward_snmpd_port", "fqdncache_size",
"ftpget_options", "ftpget_program", "ftp_list_width",
"ftp_passive", "ftp_user", "half_closed_clients",
"header_access", "header_replace", "hierarchy_stoplist",
"high_response_time_warning", "high_page_fault_warning",
"htcp_port", "http_access", "http_anonymizer", "httpd_accel",
"httpd_accel_host", "httpd_accel_port",
"httpd_accel_uses_host_header", "httpd_accel_with_proxy",
"http_port", "http_reply_access", "icp_access",
"icp_hit_stale", "icp_port", "icp_query_timeout",
"ident_lookup", "ident_lookup_access", "ident_timeout",
"incoming_http_average", "incoming_icp_average",
"inside_firewall", "ipcache_high", "ipcache_low",
"ipcache_size", "local_domain", "local_ip", "logfile_rotate",
"log_fqdn", "log_icp_queries", "log_mime_hdrs",
"maximum_object_size", "maximum_single_addr_tries",
"mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr",
"mcast_miss_encode_key", "mcast_miss_port", "memory_pools",
"memory_pools_limit", "memory_replacement_policy",
"mime_table", "min_http_poll_cnt", "min_icp_poll_cnt",
"minimum_direct_hops", "minimum_object_size",
"minimum_retry_timeout", "miss_access", "negative_dns_ttl",
"negative_ttl", "neighbor_timeout", "neighbor_type_domain",
"netdb_high", "netdb_low", "netdb_ping_period",
"netdb_ping_rate", "never_direct", "no_cache",
"passthrough_proxy", "pconn_timeout", "pid_filename",
"pinger_program", "positive_dns_ttl", "prefer_direct",
"proxy_auth", "proxy_auth_realm", "query_icmp", "quick_abort",
"quick_abort", "quick_abort_max", "quick_abort_min",
"quick_abort_pct", "range_offset_limit", "read_timeout",
"redirect_children", "redirect_program",
"redirect_rewrites_host_header", "reference_age",
"reference_age", "refresh_pattern", "reload_into_ims",
"request_body_max_size", "request_size", "request_timeout",
"shutdown_lifetime", "single_parent_bypass",
"siteselect_timeout", "snmp_access", "snmp_incoming_address",
"snmp_port", "source_ping", "ssl_proxy",
"store_avg_object_size", "store_objects_per_bucket",
"strip_query_terms", "swap_level1_dirs", "swap_level2_dirs",
"tcp_incoming_address", "tcp_outgoing_address",
"tcp_recv_bufsize", "test_reachability", "udp_hit_obj",
"udp_hit_obj_size", "udp_incoming_address",
"udp_outgoing_address", "unique_hostname", "unlinkd_program",
"uri_whitespace", "useragent_log", "visible_hostname",
"wais_relay", "wais_relay_host", "wais_relay_port",
]
opts = [ "proxy-only", "weight", "ttl", "no-query", "default",
"round-robin", "multicast-responder", "on", "off", "all",
"deny", "allow", "via", "parent", "no-digest", "heap", "lru",
"realm", "children", "credentialsttl", "none", "disable",
"offline_toggle", "diskd", "q1", "q2",
]
actions = [ "shutdown", "info", "parameter", "server_list",
"client_list", r'squid\.conf',
]
actions_stats = [ "objects", "vm_objects", "utilization",
"ipcache", "fqdncache", "dns", "redirector", "io",
"reply_headers", "filedescriptors", "netdb",
]
actions_log = [ "status", "enable", "disable", "clear"]
acls = [ "url_regex", "urlpath_regex", "referer_regex", "port",
"proto", "req_mime_type", "rep_mime_type", "method",
"browser", "user", "src", "dst", "time", "dstdomain", "ident",
"snmp_community",
]
ip_re = r'\b(?:\d{1,3}\.){3}\d{1,3}\b'
def makelistre(list):
return r'\b(?:'+'|'.join(list)+r')\b'
tokens = {
'root': [
(r'\s+', Text),
(r'#', Comment, 'comment'),
(makelistre(keywords), Keyword),
(makelistre(opts), Name.Constant),
# Actions
(makelistre(actions), String),
(r'stats/'+makelistre(actions), String),
(r'log/'+makelistre(actions)+r'=', String),
(makelistre(acls), Keyword),
(ip_re+r'(?:/(?:'+ip_re+r')|\d+)?', Number),
(r'\b\d+\b', Number),
(r'\S+', Text),
],
'comment': [
(r'\s*TAG:.*', String.Escape, '#pop'),
(r'.*', Comment, '#pop'),
],
}
class DebianControlLexer(RegexLexer):
"""
Lexer for Debian ``control`` files and ``apt-cache show <pkg>`` outputs.
*New in Pygments 0.9.*
"""
name = 'Debian Control file'
aliases = ['control']
filenames = ['control']
tokens = {
'root': [
(r'^(Description)', Keyword, 'description'),
(r'^(Maintainer)(:\s*)', bygroups(Keyword, Text), 'maintainer'),
(r'^((Build-)?Depends)', Keyword, 'depends'),
(r'^((?:Python-)?Version)(:\s*)([^\s]+)$',
bygroups(Keyword, Text, Number)),
(r'^((?:Installed-)?Size)(:\s*)([^\s]+)$',
bygroups(Keyword, Text, Number)),
(r'^(MD5Sum|SHA1|SHA256)(:\s*)([^\s]+)$',
bygroups(Keyword, Text, Number)),
(r'^([a-zA-Z\-0-9\.]*?)(:\s*)(.*?)$',
bygroups(Keyword, Whitespace, String)),
],
'maintainer': [
(r'<[^>]+>', Generic.Strong),
(r'<[^>]+>$', Generic.Strong, '#pop'),
(r',\n?', Text),
(r'.', Text),
],
'description': [
(r'(.*)(Homepage)(: )([^\s]+)', bygroups(Text, String, Name, Name.Class)),
(r':.*\n', Generic.Strong),
(r' .*\n', Text),
('', Text, '#pop'),
],
'depends': [
(r':\s*', Text),
(r'(\$)(\{)(\w+\s*:\s*\w+)', bygroups(Operator, Text, Name.Entity)),
(r'\(', Text, 'depend_vers'),
(r',', Text),
(r'\|', Operator),
(r'[\s]+', Text),
(r'[}\)]\s*$', Text, '#pop'),
(r'[}]', Text),
(r'[^,]$', Name.Function, '#pop'),
(r'([\+\.a-zA-Z0-9-][\s\n]*)', Name.Function),
(r'\[.*?\]', Name.Entity),
],
'depend_vers': [
(r'\),', Text, '#pop'),
(r'\)[^,]', Text, '#pop:2'),
(r'([><=]+)(\s*)([^\)]+)', bygroups(Operator, Text, Number))
]
}
class YamlLexerContext(LexerContext):
"""Indentation context for the YAML lexer."""
def __init__(self, *args, **kwds):
super(YamlLexerContext, self).__init__(*args, **kwds)
self.indent_stack = []
self.indent = -1
self.next_indent = 0
self.block_scalar_indent = None
class YamlLexer(ExtendedRegexLexer):
"""
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
language.
*New in Pygments 0.11.*
"""
name = 'YAML'
aliases = ['yaml']
filenames = ['*.yaml', '*.yml']
mimetypes = ['text/x-yaml']
def something(token_class):
"""Do not produce empty tokens."""
def callback(lexer, match, context):
text = match.group()
if not text:
return
yield match.start(), token_class, text
context.pos = match.end()
return callback
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback
def set_indent(token_class, implicit=False):
"""Set the previously saved indentation level."""
def callback(lexer, match, context):
text = match.group()
if context.indent < context.next_indent:
context.indent_stack.append(context.indent)
context.indent = context.next_indent
if not implicit:
context.next_indent += len(text)
yield match.start(), token_class, text
context.pos = match.end()
return callback
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
"""Process an empty line in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if (context.block_scalar_indent is None or
len(text) <= context.block_scalar_indent):
if text:
yield match.start(), indent_token_class, text
else:
indentation = text[:context.block_scalar_indent]
content = text[context.block_scalar_indent:]
yield match.start(), indent_token_class, indentation
yield (match.start()+context.block_scalar_indent,
content_token_class, content)
context.pos = match.end()
return callback
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_plain_scalar_indent(token_class):
"""Process indentation spaces in a plain scalar."""
def callback(lexer, match, context):
text = match.group()
if len(text) <= context.indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
'block-line'),
# indentation spaces
(r'[ ]*(?![ \t\n\r\f\v]|$)', save_indent(Text, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# line break
(r'\n', Text, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(Text, Number), 'ignored-line'),
],
# the %YAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![0-9A-Za-z_-]*!)'
r'([ ]+)(!|!?[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)',
bygroups(Text, Keyword.Type, Text, Keyword.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', something(Text), '#pop:2'),
# whitespaces preceeding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),
# block collection indicators
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', save_indent(Text), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', something(Text), '#pop'),
# whitespaces separating tokens
(r'[ ]+', Text),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`-]|[?:-][^ \t\n\r\f\v])',
something(Name.Variable),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors' : [
# a full-form tag
(r'!<[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+>', Keyword.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[0-9A-Za-z_-]+)?'
r'(?:![0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)?', Keyword.Type),
# an anchor
(r'&[0-9A-Za-z_-]+', Name.Label),
# an alias
(r'\*[0-9A-Za-z_-]+', Name.Variable),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', String, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', String, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# simple indicators
(r'[?:,]', Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`])',
something(Name.Variable),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', Text),
# empty line
(r'^[ ]+$',
parse_block_scalar_empty_line(Text, Name.Constant)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', parse_block_scalar_indent(Text)),
# line content
(r'[^\n\r\f\v]+', Name.Constant),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+|[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\']+', String),
# the closing quote
(r'\'', String, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\"\\]+', String),
# the closing quote
(r'"', String, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', Text),
# line breaks
(r'\n+', Text),
# document start and document end indicators
(r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', Text, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?![ \t\n\r\f\v])|[^ \t\n\r\f\v:])+', Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', Text, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+|[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v,:?\[\]{}]+', Name.Variable),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
if context is None:
context = YamlLexerContext(text, 0)
return super(YamlLexer, self).get_tokens_unprocessed(text, context)
class LighttpdConfLexer(RegexLexer):
"""
Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files.
*New in Pygments 0.11.*
"""
name = 'Lighttpd configuration file'
aliases = ['lighty', 'lighttpd']
filenames = []
mimetypes = ['text/x-lighttpd-conf']
tokens = {
'root': [
(r'#.*\n', Comment.Single),
(r'/\S*', Name), # pathname
(r'[a-zA-Z._-]+', Keyword),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'[0-9]+', Number),
(r'=>|=~|\+=|==|=|\+', Operator),
(r'\$[A-Z]+', Name.Builtin),
(r'[(){}\[\],]', Punctuation),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'\s+', Text),
],
}
class NginxConfLexer(RegexLexer):
"""
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
*New in Pygments 0.11.*
"""
name = 'Nginx configuration file'
aliases = ['nginx']
filenames = []
mimetypes = ['text/x-nginx-conf']
tokens = {
'root': [
(r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)),
(r'[^\s;#]+', Keyword, 'stmt'),
include('base'),
],
'block': [
(r'}', Punctuation, '#pop:2'),
(r'[^\s;#]+', Keyword.Namespace, 'stmt'),
include('base'),
],
'stmt': [
(r'{', Punctuation, 'block'),
(r';', Punctuation, '#pop'),
include('base'),
],
'base': [
(r'#.*\n', Comment.Single),
(r'on|off', Name.Constant),
(r'\$[^\s;#()]+', Name.Variable),
(r'([a-z0-9.-]+)(:)([0-9]+)',
bygroups(Name, Punctuation, Number.Integer)),
(r'[a-z-]+/[a-z-+]+', String), # mimetype
#(r'[a-zA-Z._-]+', Keyword),
(r'[0-9]+[km]?\b', Number.Integer),
(r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)),
(r'[:=~]', Punctuation),
(r'[^\s;#{}$]+', String), # catch all
(r'/[^\s;#]*', Name), # pathname
(r'\s+', Text),
(r'[$;]', Text), # leftover characters
],
}
class CMakeLexer(RegexLexer):
"""
Lexer for `CMake <http://cmake.org/Wiki/CMake>`_ files.
*New in Pygments 1.2.*
"""
name = 'CMake'
aliases = ['cmake']
filenames = ['*.cmake']
mimetypes = ['text/x-cmake']
tokens = {
'root': [
#(r'(ADD_CUSTOM_COMMAND|ADD_CUSTOM_TARGET|ADD_DEFINITIONS|'
# r'ADD_DEPENDENCIES|ADD_EXECUTABLE|ADD_LIBRARY|ADD_SUBDIRECTORY|'
# r'ADD_TEST|AUX_SOURCE_DIRECTORY|BUILD_COMMAND|BUILD_NAME|'
# r'CMAKE_MINIMUM_REQUIRED|CONFIGURE_FILE|CREATE_TEST_SOURCELIST|'
# r'ELSE|ELSEIF|ENABLE_LANGUAGE|ENABLE_TESTING|ENDFOREACH|'
# r'ENDFUNCTION|ENDIF|ENDMACRO|ENDWHILE|EXEC_PROGRAM|'
# r'EXECUTE_PROCESS|EXPORT_LIBRARY_DEPENDENCIES|FILE|FIND_FILE|'
# r'FIND_LIBRARY|FIND_PACKAGE|FIND_PATH|FIND_PROGRAM|FLTK_WRAP_UI|'
# r'FOREACH|FUNCTION|GET_CMAKE_PROPERTY|GET_DIRECTORY_PROPERTY|'
# r'GET_FILENAME_COMPONENT|GET_SOURCE_FILE_PROPERTY|'
# r'GET_TARGET_PROPERTY|GET_TEST_PROPERTY|IF|INCLUDE|'
# r'INCLUDE_DIRECTORIES|INCLUDE_EXTERNAL_MSPROJECT|'
# r'INCLUDE_REGULAR_EXPRESSION|INSTALL|INSTALL_FILES|'
# r'INSTALL_PROGRAMS|INSTALL_TARGETS|LINK_DIRECTORIES|'
# r'LINK_LIBRARIES|LIST|LOAD_CACHE|LOAD_COMMAND|MACRO|'
# r'MAKE_DIRECTORY|MARK_AS_ADVANCED|MATH|MESSAGE|OPTION|'
# r'OUTPUT_REQUIRED_FILES|PROJECT|QT_WRAP_CPP|QT_WRAP_UI|REMOVE|'
# r'REMOVE_DEFINITIONS|SEPARATE_ARGUMENTS|SET|'
# r'SET_DIRECTORY_PROPERTIES|SET_SOURCE_FILES_PROPERTIES|'
# r'SET_TARGET_PROPERTIES|SET_TESTS_PROPERTIES|SITE_NAME|'
# r'SOURCE_GROUP|STRING|SUBDIR_DEPENDS|SUBDIRS|'
# r'TARGET_LINK_LIBRARIES|TRY_COMPILE|TRY_RUN|UNSET|'
# r'USE_MANGLED_MESA|UTILITY_SOURCE|VARIABLE_REQUIRES|'
# r'VTK_MAKE_INSTANTIATOR|VTK_WRAP_JAVA|VTK_WRAP_PYTHON|'
# r'VTK_WRAP_TCL|WHILE|WRITE_FILE|'
# r'COUNTARGS)\b', Name.Builtin, 'args'),
(r'\b([A-Za-z_]+)([ \t]*)(\()', bygroups(Name.Builtin, Text,
Punctuation), 'args'),
include('keywords'),
include('ws')
],
'args': [
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
(r'(\${)(.+?)(})', bygroups(Operator, Name.Variable, Operator)),
(r'(?s)".*?"', String.Double),
(r'\\\S+', String),
(r'[^\)$"# \t\n]+', String),
(r'\n', Text), # explicitly legal
include('keywords'),
include('ws')
],
'string': [
],
'keywords': [
(r'\b(WIN32|UNIX|APPLE|CYGWIN|BORLAND|MINGW|MSVC|MSVC_IDE|MSVC60|'
r'MSVC70|MSVC71|MSVC80|MSVC90)\b', Keyword),
],
'ws': [
(r'[ \t]+', Text),
(r'#.+\n', Comment),
]
}
| apache-2.0 |
wilkinsg/piweb | watched.py | 1 | 2615 | #!/usr/bin/python
import hash
import os
import config
import video_info
watched_cache = {}
def prepwatched( conn ):
global watched_cache
result = conn.execute( "SELECT * FROM history" )
queueitem = result.fetchone()
while( queueitem ):
watched_cache[ queueitem[ 0 ] ] = True
queueitem = result.fetchone()
# def is_list_watched( hashlist, conn ):
# orlist = ( '?,' * len( hashlist ) ).rstrip( ',' )
# result = conn.execute( "SELECT * FROM history WHERE hash in ({})".format( orlist ), tuple( hashlist ) )
# if( result.rowcount() == len( hashlist ) ):
# return( True )
# else:
# return( False )
def is_watched( hash, conn ):
global watched_cache
try:
return( watched_cache[ hash ] )
except KeyError:
result = conn.execute( "SELECT * FROM history WHERE hash = ?", ( hash, ) )
if( result.fetchone() ):
watched_cache[ hash ] = True
return( True )
else:
watched_cache[ hash ] = False
return( False )
def is_directory_watched( dir, conn ):
dir = os.path.join( config.get_media_dir(), dir.lstrip( '/' ) )
for root, dirs, files in os.walk( dir ):
for filename in files:
if( video_info.is_video( filename ) ):
file = os.path.join( root, filename )
if( False == is_watched( hash.hash_name( file ), conn ) ):
return( False )
return( True )
def mark_all_watched( list, conn ):
global watched_cache
for filename in list:
input = hash.hash_name( filename )
if( input and len( input ) == 32 and not is_watched( input, conn ) ):
conn.execute( "INSERT INTO history VALUES( ? )", ( input, ) )
watched_cache[ input ] = True
conn.commit()
def mark_hash_watched( input, conn, docommit=True ):
global watched_cache
if( input and len( input ) == 32 and not is_watched( input, conn ) ):
conn.execute( "INSERT INTO history VALUES( ? )", ( input, ) )
watched_cache[ input ] = True
if( docommit ):
conn.commit()
return True
return( False )
def mark_hash_unwatched( input, conn ):
global watched_cache
if( input and len( input ) == 32 ):
conn.execute( "DELETE FROM history WHERE hash=?", ( input, ) )
watched_cache[ input ] = False
conn.commit()
return True
return( False )
def mark_watched( filename, conn ):
input = hash.hash_name( filename )
mark_hash_watched( input, conn )
| mit |
yangl1996/libpagure | tests/test_api.py | 1 | 12568 | import pytest
from libpagure import Pagure
@pytest.fixture(scope='module')
def simple_pg():
""" Create a simple Pagure object
to be used in test
"""
pg = Pagure(pagure_repository="testrepo")
return pg
def test_pagure_object():
""" Test the pagure object creation """
pg = Pagure(pagure_token="a token",
pagure_repository="test_repo")
assert pg.token == "a token"
assert pg.repo == "test_repo"
assert pg.namespace is None
assert pg.username is None
assert pg.instance == "https://pagure.io"
assert pg.insecure is False
assert pg.header == {"Authorization": "token a token"}
basic_url_data = [
(None, None, 'testrepo', 'https://pagure.io/api/0/testrepo/'),
(None, 'testnamespace', 'testrepo',
'https://pagure.io/api/0/testnamespace/testrepo/'),
('testfork', None, 'testrepo',
'https://pagure.io/api/0/fork/testfork/testrepo/'),
('testfork', 'testnamespace', 'testrepo',
'https://pagure.io/api/0/fork/testfork/testnamespace/testrepo/'),
]
@pytest.mark.parametrize("user, namespace, repo, expected",
basic_url_data)
def test_create_basic_url(user, namespace, repo, expected):
""" Test creation of url in function of argument
passed to the Pagure class.
"""
pg = Pagure(pagure_repository=repo,
fork_username=user,
namespace=namespace)
url = pg.create_basic_url()
assert url == expected
def test_api_version(mocker, simple_pg):
""" Test the call to the version API """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.api_version()
Pagure._call_api.assert_called_once_with('https://pagure.io/api/0/version')
def test_list_users(mocker, simple_pg):
""" Test the call to the users API """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.list_users(pattern='c')
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/users', params={'pattern': 'c'})
def test_list_tags(mocker, simple_pg):
""" Test the call to the tags API """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.list_tags(pattern='easy')
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/testrepo/tags', params={'pattern': 'easy'})
def test_list_groups(mocker, simple_pg):
""" Test the call to the groups API """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.list_groups()
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/groups', params=None)
def test_error_codes(mocker, simple_pg):
""" Test the call to the error codes API """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.error_codes()
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/error_codes')
pr_data = [
('teststatus', 'testassignee', 'testauthor',
{'status': 'teststatus', 'assignee': 'testassignee', 'author': 'testauthor'}),
(None, None, None, {})
]
@pytest.mark.parametrize("status, assignee, author, expected", pr_data)
def test_list_requests(mocker, simple_pg, status, assignee, author, expected):
""" Test the API call to the pull-requests endpoint """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.list_requests(status, assignee, author)
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/testrepo/pull-requests', params=expected)
def test_request_info(mocker, simple_pg):
""" Test the API call to get pull-request info """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.request_info('123')
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/testrepo/pull-request/123')
def test_merge_request(mocker, simple_pg):
""" Test the API call to merge a pull-request """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.merge_request('123')
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/testrepo/pull-request/123/merge', method='POST')
def test_close_request(mocker, simple_pg):
""" Test the API call to close a pull-request """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.close_request('123')
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/testrepo/pull-request/123/close', method='POST')
comment_data = [
("test body", None, None, None, {'comment': 'test body'}),
("test body", "testcommit", "testfilename", "testrow",
{'comment': 'test body', 'commit': 'testcommit', 'filename': 'testfilename',
'row': 'testrow'})
]
@pytest.mark.parametrize("body, commit, filename, row, expected", comment_data)
def test_comment_request(mocker, simple_pg, body, commit, filename, row, expected):
""" Test the API call to comment on a pull-request """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.comment_request('123', body, commit, filename, row)
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/testrepo/pull-request/123/comment', method='POST',
data=expected)
flag_data = [
('testuser', 'testpercent', 'testcomment', 'testurl', None, None,
{'username': 'testuser', 'percent': 'testpercent', 'comment': 'testcomment',
'url': 'testurl'}),
('testuser', 'testpercent', 'testcomment', 'testurl', 'testuid', 'testcommit',
{'username': 'testuser', 'percent': 'testpercent', 'comment': 'testcomment',
'url': 'testurl', 'uid': 'testuid', 'commit': 'testcommit'})
]
@pytest.mark.parametrize("username, percent, comment, url, uid, commit, expected",
flag_data)
def test_flag_request(mocker, simple_pg, username, percent, comment, url, uid,
commit, expected):
""" Test the API call to flag a pull-request """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.flag_request('123', username, percent, comment, url, uid, commit)
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/testrepo/pull-request/123/flag', method='POST',
data=expected)
def test_create_issue(mocker, simple_pg):
""" Test the API call to create an issue """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.create_issue('A test issue', 'Some issue content', True)
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/testrepo/new_issue', method='POST',
data={'title': 'A test issue', 'issue_content': 'Some issue content',
'priority': True})
def test_list_issues(mocker, simple_pg):
""" Test the API call to list all issues of a project """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.list_issues('status', 'tags', 'assignee', 'author',
'milestones', 'priority', 'no_stones', 'since')
expected = {'status': 'status', 'tags': 'tags', 'assignee': 'assignee',
'author': 'author', 'milestones': 'milestones', 'priority': 'priority',
'no_stones': 'no_stones', 'since': 'since'}
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/testrepo/issues', params=expected)
def test_issue_info(mocker, simple_pg):
""" Test the API call to info about a project issue """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.issue_info('123')
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/testrepo/issue/123')
def test_list_comment(mocker, simple_pg):
""" Test the API call to info about a project issue """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.get_list_comment('123', '001')
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/testrepo/issue/123/comment/001')
def test_change_issue_status(mocker, simple_pg):
""" Test the API call to change the status of a project issue """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.change_issue_status('123', 'Closed', 'wontfix')
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/testrepo/issue/123/status', method='POST',
data={'status': 'Closed', 'close_status': 'wontfix'})
def test_change_issue_milestone(mocker, simple_pg):
""" Test the API call to change the milestone of a project issue """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.change_issue_milestone('123', 'Tomorrow')
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/testrepo/issue/123/milestone', method='POST',
data={'milestone': 'Tomorrow'})
def test_comment_issue(mocker, simple_pg):
""" Test the API call to change the milestone of a project issue """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.comment_issue('123', 'A comment')
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/testrepo/issue/123/comment', method='POST',
data={'comment': 'A comment'})
def test_project_tags(mocker, simple_pg):
""" Test the API call to get a project tags """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.project_tags()
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/testrepo/git/tags')
def test_list_projects(mocker, simple_pg):
""" Test the API call to list all projects on a pagure instance """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.list_projects('tags', 'pattern', 'username', 'owner',
'namespace', 'fork', 'short', 1, 100)
expected = {'tags': 'tags', 'pattern': 'pattern', 'username': 'username',
'owner': 'owner', 'namespace': 'namespace', 'fork': 'fork',
'short': 'short', 'page': '1', 'per_page': '100'}
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/projects', params=expected)
def test_user_info(mocker, simple_pg):
""" Test the API call to get info about a user """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.user_info('auser')
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/user/auser')
def test_new_project(mocker, simple_pg):
""" Test the API call to list all projects on a pagure instance """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.new_project('name', 'description', 'namespace', 'url',
'avatar_email', True, True)
expected = {'name': 'name', 'description': 'description', 'namespace': 'namespace',
'url': 'url', 'avatar_email': 'avatar_email',
'create_readme': True, 'private': True}
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/new', data=expected, method='POST')
def test_project_branches(mocker, simple_pg):
""" Test the API call to get info about a user """
mocker.patch('libpagure.Pagure._call_api')
simple_pg.project_branches()
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/testrepo/git/branches')
def test_user_activity_stats(mocker, simple_pg):
""" Test the API call to get stats about a user activity"""
mocker.patch('libpagure.Pagure._call_api')
simple_pg.user_activity_stats('auser')
expected = {'username': 'auser'}
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/user/auser/activity/stats', params=expected)
def test_user_activity_stats_by_date(mocker, simple_pg):
""" Test the API call to get stats about a user activity by specific date"""
mocker.patch('libpagure.Pagure._call_api')
simple_pg.user_activity_stats_by_date('auser',"2017-12-30")
expected = {'username': 'auser', 'date' : '2017-12-30'}
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/user/auser/activity/2017-12-30', params=expected)
def test_list_pull_requests(mocker, simple_pg):
""" Test the API call to get stats about a user's pull requests"""
mocker.patch('libpagure.Pagure._call_api')
simple_pg.list_pull_requests('auser', 1)
expected = {'username': 'auser', 'page': 1}
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/user/auser/requests/filed', params=expected)
def test_list_prs_actionable_by_user(mocker, simple_pg):
""" Test the API call to list PR's actionable for a given user"""
mocker.patch('libpagure.Pagure._call_api')
simple_pg.list_prs_actionable_by_user('auser', 1)
expected = {'username': 'auser', 'page': 1}
Pagure._call_api.assert_called_once_with(
'https://pagure.io/api/0/user/auser/requests/actionable', params=expected)
| gpl-2.0 |
mikehowson/foursquared.eclair | util/common.py | 262 | 2820 | #!/usr/bin/python
import logging
from xml.dom import minidom
from xml.dom import pulldom
BOOLEAN = "boolean"
STRING = "String"
GROUP = "Group"
# Interfaces that all FoursquareTypes implement.
DEFAULT_INTERFACES = ['FoursquareType']
# Interfaces that specific FoursqureTypes implement.
INTERFACES = {
}
DEFAULT_CLASS_IMPORTS = [
]
CLASS_IMPORTS = {
# 'Checkin': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Venue': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Tip': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
}
COMPLEX = [
'Group',
'Badge',
'Beenhere',
'Checkin',
'CheckinResponse',
'City',
'Credentials',
'Data',
'Mayor',
'Rank',
'Score',
'Scoring',
'Settings',
'Stats',
'Tags',
'Tip',
'User',
'Venue',
]
TYPES = COMPLEX + ['boolean']
def WalkNodesForAttributes(path):
"""Parse the xml file getting all attributes.
<venue>
<attribute>value</attribute>
</venue>
Returns:
type_name - The java-style name the top node will have. "Venue"
top_node_name - unadultured name of the xml stanza, probably the type of
java class we're creating. "venue"
attributes - {'attribute': 'value'}
"""
doc = pulldom.parse(path)
type_name = None
top_node_name = None
attributes = {}
level = 0
for event, node in doc:
# For skipping parts of a tree.
if level > 0:
if event == pulldom.END_ELEMENT:
level-=1
logging.warn('(%s) Skip end: %s' % (str(level), node))
continue
elif event == pulldom.START_ELEMENT:
logging.warn('(%s) Skipping: %s' % (str(level), node))
level+=1
continue
if event == pulldom.START_ELEMENT:
logging.warn('Parsing: ' + node.tagName)
# Get the type name to use.
if type_name is None:
type_name = ''.join([word.capitalize()
for word in node.tagName.split('_')])
top_node_name = node.tagName
logging.warn('Found Top Node Name: ' + top_node_name)
continue
typ = node.getAttribute('type')
child = node.getAttribute('child')
# We don't want to walk complex types.
if typ in COMPLEX:
logging.warn('Found Complex: ' + node.tagName)
level = 1
elif typ not in TYPES:
logging.warn('Found String: ' + typ)
typ = STRING
else:
logging.warn('Found Type: ' + typ)
logging.warn('Adding: ' + str((node, typ)))
attributes.setdefault(node.tagName, (typ, [child]))
logging.warn('Attr: ' + str((type_name, top_node_name, attributes)))
return type_name, top_node_name, attributes
| apache-2.0 |
Hazelsuko07/17WarmingUp | py3.6/lib/python3.6/site-packages/dominate/util.py | 25 | 3902 | '''
Utility classes for creating dynamic html documents
'''
__license__ = '''
This file is part of Dominate.
Dominate is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Dominate is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General
Public License along with Dominate. If not, see
<http://www.gnu.org/licenses/>.
'''
import re
from .dom_tag import dom_tag
try:
basestring = basestring
except NameError:
basestring = str
unichr = chr
def include(f):
'''
includes the contents of a file on disk.
takes a filename
'''
fl = open(f, 'r')
data = fl.read()
fl.close()
return raw(data)
def system(cmd, data=None):
'''
pipes the output of a program
'''
import subprocess
s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
out, err = s.communicate(data)
return out.decode('utf8')
def escape(data, quote=True): # stoled from std lib cgi
'''
Escapes special characters into their html entities
Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true, the quotation mark character (")
is also translated.
This is used to escape content that appears in the body of an HTML cocument
'''
data = data.replace("&", "&") # Must be done first!
data = data.replace("<", "<")
data = data.replace(">", ">")
if quote:
data = data.replace('"', """)
return data
_unescape = {
'quot': 34,
'amp': 38,
'lt': 60,
'gt': 62,
'nbsp': 32,
# more here
# http://www.w3.org/TR/html4/sgml/entities.html
'yuml': 255,
}
str_escape = escape
def unescape(data):
'''
unescapes html entities. the opposite of escape.
'''
cc = re.compile('&(?:(?:#(\d+))|([^;]+));')
result = []
m = cc.search(data)
while m:
result.append(data[0:m.start()])
d = m.group(1)
if d:
d = int(d)
result.append(unichr(d))
else:
d = _unescape.get(m.group(2), ord('?'))
result.append(unichr(d))
data = data[m.end():]
m = cc.search(data)
result.append(data)
return ''.join(result)
_reserved = ";/?:@&=+$, "
_replace_map = dict((c, '%%%2X' % ord(c)) for c in _reserved)
def url_escape(data):
return ''.join(_replace_map.get(c, c) for c in data)
def url_unescape(data):
return re.sub('%([0-9a-fA-F]{2})',
lambda m: unichr(int(m.group(1), 16)), data)
class lazy(dom_tag):
'''
delays function execution until rendered
'''
def __new__(_cls, *args, **kwargs):
'''
Need to reset this special method or else
dom_tag will think it's being used as a dectorator.
This means lazy() can't be used as a dectorator, but
thinking about when you might want that just confuses me.
'''
return object.__new__(_cls)
def __init__(self, func, *args, **kwargs):
super(lazy, self).__init__()
self.func = func
self.args = args
self.kwargs = kwargs
def _render(self, sb, *a, **kw):
r = self.func(*self.args, **self.kwargs)
sb.append(str(r))
# TODO rename this to raw?
class text(dom_tag):
'''
Just a string. useful for inside context managers
'''
is_pretty = False
is_inline = True
def __init__(self, _text, escape=True):
super(text, self).__init__()
if escape:
self.text = str_escape(_text)
else:
self.text = _text
def _render(self, sb, *a, **kw):
sb.append(self.text)
return sb
def raw(s):
'''
Inserts a raw string into the DOM. Unsafe.
'''
return text(s, escape=False)
| mit |
leedm777/ansible-modules-core | cloud/openstack/os_object.py | 130 | 4074 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_object
short_description: Create or Delete objects and containers from OpenStack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
extends_documentation_fragment: openstack
description:
- Create or Delete objects and containers from OpenStack
options:
container:
description:
- The name of the container in which to create the object
required: true
name:
description:
- Name to be give to the object. If omitted, operations will be on
the entire container
required: false
file:
description:
- Path to local file to be uploaded.
required: false
container_access:
description:
- desired container access level.
required: false
choices: ['private', 'public']
default: private
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
'''
EXAMPLES = '''
# Creates a object named 'fstab' in the 'config' container
- os_object: cloud=mordred state=present name=fstab container=config file=/etc/fstab
# Deletes a container called config and all of its contents
- os_object: cloud=rax-iad state=absent container=config
'''
def process_object(
cloud_obj, container, name, filename, container_access, **kwargs):
changed = False
container_obj = cloud_obj.get_container(container)
if kwargs['state'] == 'present':
if not container_obj:
container_obj = cloud_obj.create_container(container)
changed = True
if cloud_obj.get_container_access(container) != container_access:
cloud_obj.set_container_access(container, container_access)
changed = True
if name:
if cloud_obj.is_object_stale(container, name, filename):
cloud_obj.create_object(container, name, filename)
changed = True
else:
if container_obj:
if name:
if cloud_obj.get_object_metadata(container, name):
cloud_obj.delete_object(container, name)
changed= True
else:
cloud_obj.delete_container(container)
changed= True
return changed
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
container=dict(required=True),
filename=dict(required=False, default=None),
container_access=dict(default='private', choices=['private', 'public']),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
changed = process_object(cloud, **module.params)
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
| gpl-3.0 |
hagifoo/gae-pomodoro | app/lib/oauth2client/contrib/gce.py | 39 | 5431 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google Compute Engine
Utilities for making it easier to use OAuth 2.0 on Google Compute Engine.
"""
import logging
import warnings
from six.moves import http_client
from oauth2client import client
from oauth2client.contrib import _metadata
logger = logging.getLogger(__name__)
_SCOPES_WARNING = """\
You have requested explicit scopes to be used with a GCE service account.
Using this argument will have no effect on the actual scopes for tokens
requested. These scopes are set at VM instance creation time and
can't be overridden in the request.
"""
class AppAssertionCredentials(client.AssertionCredentials):
"""Credentials object for Compute Engine Assertion Grants
This object will allow a Compute Engine instance to identify itself to
Google and other OAuth 2.0 servers that can verify assertions. It can be
used for the purpose of accessing data stored under an account assigned to
the Compute Engine instance itself.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens.
Note that :attr:`service_account_email` and :attr:`scopes`
will both return None until the credentials have been refreshed.
To check whether credentials have previously been refreshed use
:attr:`invalid`.
"""
def __init__(self, email=None, *args, **kwargs):
"""Constructor for AppAssertionCredentials
Args:
email: an email that specifies the service account to use.
Only necessary if using custom service accounts
(see https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances#createdefaultserviceaccount).
"""
if 'scopes' in kwargs:
warnings.warn(_SCOPES_WARNING)
kwargs['scopes'] = None
# Assertion type is no longer used, but still in the
# parent class signature.
super(AppAssertionCredentials, self).__init__(None, *args, **kwargs)
self.service_account_email = email
self.scopes = None
self.invalid = True
@classmethod
def from_json(cls, json_data):
raise NotImplementedError(
'Cannot serialize credentials for GCE service accounts.')
def to_json(self):
raise NotImplementedError(
'Cannot serialize credentials for GCE service accounts.')
def retrieve_scopes(self, http):
"""Retrieves the canonical list of scopes for this access token.
Overrides client.Credentials.retrieve_scopes. Fetches scopes info
from the metadata server.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
Returns:
A set of strings containing the canonical list of scopes.
"""
self._retrieve_info(http)
return self.scopes
def _retrieve_info(self, http):
"""Retrieves service account info for invalid credentials.
Args:
http: an object to be used to make HTTP requests.
"""
if self.invalid:
info = _metadata.get_service_account_info(
http,
service_account=self.service_account_email or 'default')
self.invalid = False
self.service_account_email = info['email']
self.scopes = info['scopes']
def _refresh(self, http):
"""Refreshes the access token.
Skip all the storage hoops and just refresh using the API.
Args:
http: an object to be used to make HTTP requests.
Raises:
HttpAccessTokenRefreshError: When the refresh fails.
"""
try:
self._retrieve_info(http)
self.access_token, self.token_expiry = _metadata.get_token(
http, service_account=self.service_account_email)
except http_client.HTTPException as err:
raise client.HttpAccessTokenRefreshError(str(err))
@property
def serialization_data(self):
raise NotImplementedError(
'Cannot serialize credentials for GCE service accounts.')
def create_scoped_required(self):
return False
def sign_blob(self, blob):
"""Cryptographically sign a blob (of bytes).
This method is provided to support a common interface, but
the actual key used for a Google Compute Engine service account
is not available, so it can't be used to sign content.
Args:
blob: bytes, Message to be signed.
Raises:
NotImplementedError, always.
"""
raise NotImplementedError(
'Compute Engine service accounts cannot sign blobs')
| mit |
annttu/sikteeri | membership/templatetags/sorturl.py | 2 | 3619 |
from django import template
from django.http import QueryDict
register = template.Library()
sort_cycles = {
'id': ['id', '-id'],
'status': ['status', '-status'],
'bill_name': ['membership__person__first_name',
'membership__person__last_name',
'-membership__person__first_name',
'-membership__person__last_name'],
'name': ['person__first_name', 'person__last_name', '-person__first_name', '-person__last_name'],
'payer_name': ['payer_name', '-payer_name'],
'amount': ['amount', '-amount'],
'reference_number': ['reference_number', '-reference_number'],
'billingcycle': ['billingcycle', '-billingcycle'],
'comment': ['comment', '-comment'],
'cycle': ['start', '-start', 'end', '-end'],
'sum': ['sum', '-sum'],
'due_date': ['bill__due_date', '-bill__due_date']
}
def lookup_sort(sort):
"""Find order by term by sort string
>>> lookup_sort("id:1")
u'id'
>>> lookup_sort(None)
>>> lookup_sort("id:2")
u'-id'
>>> lookup_sort("foo:1")
>>> lookup_sort("id:3")
>>> lookup_sort("id:a")
"""
if sort is None:
return None
assert isinstance(sort, str)
key, __, index = sort.partition(':')
try:
return sort_cycles[key][int(index)-1]
except (IndexError, KeyError, ValueError):
return None
def next_sort(sort):
"""Find next value from sort_cycles
>>> next_sort("id:1")
u'id:2'
>>> next_sort("id:2")
u'id:1'
>>> next_sort("id:3")
u'id:1'
>>> next_sort("id:a")
u'id:1'
>>> next_sort("asdf:xyz")
"""
if sort is None:
return None
assert isinstance(sort, str)
key, __, index = sort.partition(':')
try:
available_sorts = sort_cycles[key]
max_index = len(available_sorts)
try:
cur_index = int(index)
if cur_index > max_index:
next_index = 1
else:
next_index = 1 + cur_index % max_index
except ValueError:
next_index = 1
return "{key}:{index}".format(key=key, index=next_index)
except (IndexError, KeyError):
return None
class SortUrl(template.Node):
def __init__(self, field):
self.field = field.replace('"', '').replace("'", '').strip()
if self.field not in sort_cycles:
raise ValueError("Unknown sort key")
def render(self, context):
"""Return querystring part of URI
>>> s = SortUrl('id')
>>> s.render({'querystring': {}})
u'?sort=id:1'
>>> s.render({'querystring': {'sort':'id:1'}})
u'?sort=id:2'
>>> s.render({'querystring': {'sort':'name:1'}})
u'?sort=id:1'
>>> s = SortUrl('asdf')
Traceback (most recent call last):
ValueError: Unknown sort key
"""
querystring = context.get('querystring', {})
if isinstance(querystring, QueryDict):
querystring = querystring.dict()
sort = querystring.get('sort', "{key}:None".format(key=self.field))
key, __, __ = sort.partition(':')
next_sort_value = next_sort(sort)
if key == self.field and next_sort_value is not None:
querystring['sort'] = next_sort_value
else:
querystring['sort'] = "{key}:1".format(key=self.field)
return "?" + "&".join(["=".join(k) for k in list(querystring.items())])
def do_sorturl(parser, token):
"""Get sorturl by sort field"""
tag_name, field = token.split_contents()
return SortUrl(field)
register.tag('sorturl', do_sorturl)
| mit |
cityofsomerville/cornerwise | server/shared/migrations/0002_staffnotification.py | 3 | 1092 | from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('shared', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='StaffNotification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('addresses', models.TextField(blank=True, default='')),
('proposals', models.TextField(blank=True, default='')),
('radius', models.FloatField()),
('message', models.TextField()),
('subscribers', models.IntegerField()),
('region', models.CharField(max_length=64)),
('sender', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| mit |
KunihikoKido/aws-lambda-es-dumpdata | fabfile.py | 3 | 1719 | # -*- coding: utf-8 -*-
import os
from fabric.api import local
from fabric.api import task
from fabric_aws_lambda import SetupTask
from fabric_aws_lambda import InvokeTask
from fabric_aws_lambda import MakeZipTask
from fabric_aws_lambda import AWSLambdaInvokeTask
from fabric_aws_lambda import AWSLambdaGetConfigTask
from fabric_aws_lambda import AWSLambdaUpdateCodeTask
BASE_PATH = os.path.dirname(__file__)
LIB_PATH = os.path.join(BASE_PATH, 'lib')
INSTALL_PREFIX = os.path.join(BASE_PATH, 'local')
REQUIREMENTS_TXT = os.path.join(BASE_PATH, 'requirements.txt')
LAMBDA_FUNCTION_NAME = os.path.basename(BASE_PATH)
LAMBDA_HANDLER = 'lambda_handler'
LAMBDA_FILE = os.path.join(BASE_PATH, 'lambda_function.py')
EVENT_FILE = os.path.join(BASE_PATH, 'event.json')
ZIP_FILE = os.path.join(BASE_PATH, 'lambda_function.zip')
ZIP_EXCLUDE_FILE = os.path.join(BASE_PATH, 'exclude.lst')
@task
def clean():
for target in [ZIP_FILE, LIB_PATH, INSTALL_PREFIX]:
local('rm -rf {}'.format(target))
task_setup = SetupTask(
requirements=REQUIREMENTS_TXT,
lib_path=LIB_PATH,
install_prefix=INSTALL_PREFIX
)
task_invoke = InvokeTask(
lambda_file=LAMBDA_FILE,
lambda_handler=LAMBDA_HANDLER,
event_file=EVENT_FILE,
lib_path=LIB_PATH
)
task_makezip = MakeZipTask(
zip_file=ZIP_FILE,
exclude_file=ZIP_EXCLUDE_FILE,
lib_path=LIB_PATH
)
task_aws_invoke = AWSLambdaInvokeTask(
function_name=LAMBDA_FUNCTION_NAME,
payload='file://{}'.format(EVENT_FILE)
)
task_aws_getconfig = AWSLambdaGetConfigTask(
function_name=LAMBDA_FUNCTION_NAME,
)
task_aws_updatecode = AWSLambdaUpdateCodeTask(
function_name=LAMBDA_FUNCTION_NAME,
zip_file='fileb://{}'.format(ZIP_FILE)
)
| mit |
cindym/hamster-applet | src/hamster/lib/i18n.py | 4 | 1244 | # - coding: utf-8 -
import os
import locale, gettext
def setup_i18n():
#determine location of po files
try:
from .. import defs
except:
defs = None
# to avoid confusion, we won't translate unless running installed
# reason for that is that bindtextdomain is expecting
# localedir/language/LC_MESSAGES/domain.mo format, but we have
# localedir/language.mo at it's best (after build)
# and there does not seem to be any way to run straight from sources
if defs:
locale_dir = os.path.realpath(os.path.join(defs.DATA_DIR, "locale"))
for module in (locale,gettext):
module.bindtextdomain('hamster-applet', locale_dir)
module.textdomain('hamster-applet')
module.bind_textdomain_codeset('hamster-applet','utf8')
gettext.install("hamster-applet", locale_dir, unicode = True)
else:
gettext.install("hamster-applet-uninstalled")
def C_(ctx, s):
"""Provide qualified translatable strings via context.
Taken from gnome-games.
"""
translated = gettext.gettext('%s\x04%s' % (ctx, s))
if '\x04' in translated:
# no translation found, return input string
return s
return translated
| gpl-3.0 |
nixingyang/Kaggle-Competitions | TalkingData AdTracking Fraud Detection/perform_ensembling.py | 1 | 2489 | import os
import glob
import shutil
import datetime
import numpy as np
import pandas as pd
# Dataset
PROJECT_NAME = "TalkingData AdTracking Fraud Detection"
PROJECT_FOLDER_PATH = os.path.join(os.path.expanduser("~"), "Documents/Dataset",
PROJECT_NAME)
# Submission
TEAM_NAME = "Aurora"
SUBMISSION_FOLDER_PATH = os.path.join(PROJECT_FOLDER_PATH, "submission")
os.makedirs(SUBMISSION_FOLDER_PATH, exist_ok=True)
# Ensembling
WORKSPACE_FOLDER_PATH = os.path.join(PROJECT_FOLDER_PATH, "script/Mar_25_3")
KEYWORD = "DL"
# Generate a zip archive for a file
create_zip_archive = lambda file_path: shutil.make_archive(
file_path[:file_path.rindex(".")], "zip",
os.path.abspath(os.path.join(file_path, "..")), os.path.basename(file_path))
def run():
print("Searching for submissions with keyword {} at {} ...".format(
KEYWORD, WORKSPACE_FOLDER_PATH))
submission_file_path_list = sorted(
glob.glob(os.path.join(WORKSPACE_FOLDER_PATH, "*{}*".format(KEYWORD))))
assert len(submission_file_path_list) != 0
ranking_array_list = []
for submission_file_path in submission_file_path_list:
print("Loading {} ...".format(submission_file_path))
submission_df = pd.read_csv(submission_file_path)
print("Ranking the entries ...")
index_series = submission_df["is_attributed"].argsort()
ranking_array = np.zeros(index_series.shape, dtype=np.uint32)
ranking_array[index_series] = np.arange(len(index_series))
ranking_array_list.append(ranking_array)
ensemble_df = submission_df.copy()
ensemble_prediction_array = np.mean(ranking_array_list, axis=0)
apply_normalization = lambda data_array: 1.0 * (data_array - np.min(
data_array)) / (np.max(data_array) - np.min(data_array))
ensemble_df["is_attributed"] = apply_normalization(
ensemble_prediction_array)
ensemble_file_path = os.path.join(
SUBMISSION_FOLDER_PATH, "{} {} {}.csv".format(
TEAM_NAME, KEYWORD,
str(datetime.datetime.now()).split(".")[0]).replace(" ", "_"))
print("Saving submission to {} ...".format(ensemble_file_path))
ensemble_df.to_csv(ensemble_file_path, float_format="%.6f", index=False)
compressed_ensemble_file_path = create_zip_archive(ensemble_file_path)
print("Saving compressed submission to {} ...".format(
compressed_ensemble_file_path))
print("All done!")
if __name__ == "__main__":
run()
| mit |
leilihh/nova | nova/virt/baremetal/db/sqlalchemy/session.py | 17 | 2071 | # Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend."""
from oslo.config import cfg
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova import paths
opts = [
cfg.StrOpt('sql_connection',
default=('sqlite:///' +
paths.state_path_def('baremetal_nova.sqlite')),
help='The SQLAlchemy connection string used to connect to the '
'bare-metal database'),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade(CONF.baremetal.sql_connection,
**dict(CONF.database.iteritems()))
return _FACADE
def get_session(autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy session."""
facade = _create_facade_lazily()
return facade.get_session(autocommit=autocommit,
expire_on_commit=expire_on_commit)
def get_engine():
"""Return a SQLAlchemy engine."""
facade = _create_facade_lazily()
return facade.get_engine()
| apache-2.0 |
SimtterCom/gyp | test/configurations/inheritance/gyptest-inheritance.py | 72 | 1338 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable in three different configurations.
"""
import TestGyp
test = TestGyp.TestGyp()
if test.format == 'android':
# This test currently fails on android. Investigate why, fix the issues
# responsible, and reenable this test on android. See bug:
# https://code.google.com/p/gyp/issues/detail?id=436
test.skip_test(message='Test fails on android. Fix and reenable.\n')
test.run_gyp('configurations.gyp')
test.set_configuration('Release')
test.build('configurations.gyp')
test.run_built_executable('configurations',
stdout=('Base configuration\n'
'Common configuration\n'
'Common2 configuration\n'
'Release configuration\n'))
test.set_configuration('Debug')
test.build('configurations.gyp')
test.run_built_executable('configurations',
stdout=('Base configuration\n'
'Common configuration\n'
'Common2 configuration\n'
'Debug configuration\n'))
test.pass_test()
| bsd-3-clause |
flennerhag/mlens | mlens/externals/sklearn/validation.py | 1 | 27114 | """
Scikit-learn utilities for input validation.
"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from .. import six
from ...utils.exceptions import NotFittedError, NonBLASDotWarning, \
DataConversionWarning
try:
from inspect import signature
except ImportError:
from mlens.externals.funcsigs import signature
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Parameters
----------
X : array or sparse matrix
"""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats.
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
if X.dtype.kind in 'uib' and X.dtype.itemsize <= 4:
return_dtype = np.float32
else:
return_dtype = np.float64
return X.astype(return_dtype)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit') and callable(x.fit):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent representation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
lengths = [_num_samples(X) for X in arrays if X is not None]
uniques = np.unique(lengths)
if len(uniques) > 1:
raise ValueError("Found input variables with inconsistent numbers of"
" samples: %r" % [int(l) for l in lengths])
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, boolean or list/tuple of strings
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). If the input is sparse but
not in the allowed format, it will be converted to the first listed
format. True allows the input to be any format. False means
that a sparse matrix input will raise an error.
dtype : string, type or None
Data type of result. If None, the dtype of the input is preserved.
copy : boolean
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if isinstance(accept_sparse, six.string_types):
accept_sparse = [accept_sparse]
if accept_sparse is False:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
elif isinstance(accept_sparse, (list, tuple)):
if len(accept_sparse) == 0:
raise ValueError("When providing 'accept_sparse' "
"as a tuple or list, it must contain at "
"least one string value.")
# ensure correct sparse format
if spmatrix.format not in accept_sparse:
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
elif accept_sparse is not True:
# any other type
raise ValueError("Parameter 'accept_sparse' should be a string, "
"boolean or list of strings. You provided "
"'accept_sparse={}'.".format(accept_sparse))
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=False, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2D numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, boolean or list/tuple of strings (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
.. deprecated:: 0.19
Passing 'None' to parameter ``accept_sparse`` in methods is
deprecated in version 0.19 "and will be removed in 0.21. Use
``accept_sparse=False`` instead.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to raise a value error if X is not 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
# accept_sparse 'None' deprecation check
if accept_sparse is None:
warnings.warn(
"Passing 'None' to parameter 'accept_sparse' in methods "
"check_array and check_X_y is deprecated in version 0.19 "
"and will be removed in 0.21. Use 'accept_sparse=False' "
" instead.", DeprecationWarning)
accept_sparse = False
# store whether originally we wanted numeric dtype
dtype_numeric = isinstance(dtype, six.string_types) and dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
raise ValueError(
"Expected 2D array, got 1D array instead:\narray={}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample.".format(array))
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, shape_repr, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, shape_repr, ensure_min_features,
context))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s%s."
% (dtype_orig, array.dtype, context))
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=False, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2d and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, boolean or list of string (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
.. deprecated:: 0.19
Passing 'None' to parameter ``accept_sparse`` in methods is
deprecated in version 0.19 "and will be removed in 0.21. Use
``accept_sparse=False`` instead.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X. This parameter
does not influence whether y can have np.inf or np.nan values.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters
----------
seed : None | int | instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Parameters
----------
estimator : object
An estimator to inspect.
parameter: str
The searched parameter.
Returns
-------
is_parameter: bool
Whether the parameter was found to be a named parameter of the
estimator's fit method.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg.:
``["coef_", "estimator_", ...], "coef_"``
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
Returns
-------
None
Raises
------
NotFittedError
If the attributes are not found.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| mit |
gregelin/python-ideascaleapi | setup.py | 1 | 1086 | from distutils.core import setup
from ideascaleapi import __version__,__license__,__doc__
license_text = open('LICENSE').read()
long_description = open('README.rst').read()
setup(name="python-ideascaleapi",
version=__version__,
py_modules=["ideascaleapi"],
description="Libraries for interacting with the Ideascale API",
author="Greg Elin (forking James Turk)",
author_email = "greg@fotonotes.net",
license=license_text,
url="http://github.com/gregelin/python-ideascaleapi/tree/master",
long_description=long_description,
platforms=["any"],
classifiers=["Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
install_requires=["simplejson >= 1.8"]
)
| bsd-3-clause |
frerepoulet/ZeroNet | src/lib/pyasn1/type/namedtype.py | 4 | 14243 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
import sys
from pyasn1.type import tagmap
from pyasn1 import error
__all__ = ['NamedType', 'OptionalNamedType', 'DefaultedNamedType', 'NamedTypes']
class NamedType(object):
"""Create named field object for a constructed ASN.1 type.
The |NamedType| object represents a single name and ASN.1 type of a constructed ASN.1 type.
|NamedType| objects are immutable and duck-type Python :class:`tuple` objects
holding *name* and *asn1Object* components.
Parameters
----------
name: :py:class:`str`
Field name
asn1Object:
ASN.1 type object
"""
isOptional = False
isDefaulted = False
def __init__(self, name, asn1Object):
self.__name = name
self.__type = asn1Object
self.__nameAndType = name, asn1Object
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.__name, self.__type)
def __eq__(self, other):
return self.__nameAndType == other
def __ne__(self, other):
return self.__nameAndType != other
def __lt__(self, other):
return self.__nameAndType < other
def __le__(self, other):
return self.__nameAndType <= other
def __gt__(self, other):
return self.__nameAndType > other
def __ge__(self, other):
return self.__nameAndType >= other
def __hash__(self):
return hash(self.__nameAndType)
def __getitem__(self, idx):
return self.__nameAndType[idx]
def __iter__(self):
return iter(self.__nameAndType)
@property
def name(self):
return self.__name
@property
def asn1Object(self):
return self.__type
# Backward compatibility
def getName(self):
return self.name
def getType(self):
return self.asn1Object
class OptionalNamedType(NamedType):
__doc__ = NamedType.__doc__
isOptional = True
class DefaultedNamedType(NamedType):
__doc__ = NamedType.__doc__
isDefaulted = True
class NamedTypes(object):
"""Create a collection of named fields for a constructed ASN.1 type.
The NamedTypes object represents a collection of named fields of a constructed ASN.1 type.
*NamedTypes* objects are immutable and duck-type Python :class:`dict` objects
holding *name* as keys and ASN.1 type object as values.
Parameters
----------
*namedTypes: :class:`~pyasn1.type.namedtype.NamedType`
"""
def __init__(self, *namedTypes):
self.__namedTypes = namedTypes
self.__namedTypesLen = len(self.__namedTypes)
self.__minTagSet = None
self.__tagToPosMapImpl = None
self.__nameToPosMapImpl = None
self.__ambigiousTypesImpl = None
self.__tagMap = {}
self.__hasOptionalOrDefault = None
self.__requiredComponents = None
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__, ', '.join([repr(x) for x in self.__namedTypes])
)
def __eq__(self, other):
return self.__namedTypes == other
def __ne__(self, other):
return self.__namedTypes != other
def __lt__(self, other):
return self.__namedTypes < other
def __le__(self, other):
return self.__namedTypes <= other
def __gt__(self, other):
return self.__namedTypes > other
def __ge__(self, other):
return self.__namedTypes >= other
def __hash__(self):
return hash(self.__namedTypes)
def __getitem__(self, idx):
try:
return self.__namedTypes[idx]
except TypeError:
return self.__namedTypes[self.__nameToPosMap[idx]]
def __contains__(self, key):
return key in self.__nameToPosMap
def __iter__(self):
return (x[0] for x in self.__namedTypes)
if sys.version_info[0] <= 2:
def __nonzero__(self):
return self.__namedTypesLen > 0
else:
def __bool__(self):
return self.__namedTypesLen > 0
def __len__(self):
return self.__namedTypesLen
# Python dict protocol
def values(self):
return (namedType.asn1Object for namedType in self.__namedTypes)
def keys(self):
return (namedType.name for namedType in self.__namedTypes)
def items(self):
return ((namedType.name, namedType.asn1Object) for namedType in self.__namedTypes)
def clone(self):
return self.__class__(*self.__namedTypes)
@property
def __tagToPosMap(self):
if self.__tagToPosMapImpl is None:
self.__tagToPosMapImpl = {}
for idx, namedType in enumerate(self.__namedTypes):
tagMap = namedType.asn1Object.tagMap
if not tagMap:
continue
for _tagSet in tagMap.presentTypes:
if _tagSet in self.__tagToPosMapImpl:
raise error.PyAsn1Error('Duplicate type %s in %s' % (_tagSet, namedType))
self.__tagToPosMapImpl[_tagSet] = idx
return self.__tagToPosMapImpl
@property
def __nameToPosMap(self):
if self.__nameToPosMapImpl is None:
self.__nameToPosMapImpl = {}
for idx, namedType in enumerate(self.__namedTypes):
if namedType.name in self.__nameToPosMapImpl:
raise error.PyAsn1Error('Duplicate name %s in %s' % (namedType.name, namedType))
self.__nameToPosMapImpl[namedType.name] = idx
return self.__nameToPosMapImpl
@property
def __ambigiousTypes(self):
if self.__ambigiousTypesImpl is None:
self.__ambigiousTypesImpl = {}
ambigiousTypes = ()
for idx, namedType in reversed(tuple(enumerate(self.__namedTypes))):
if namedType.isOptional or namedType.isDefaulted:
ambigiousTypes = (namedType,) + ambigiousTypes
else:
ambigiousTypes = (namedType,)
self.__ambigiousTypesImpl[idx] = NamedTypes(*ambigiousTypes)
return self.__ambigiousTypesImpl
def getTypeByPosition(self, idx):
"""Return ASN.1 type object by its position in fields set.
Parameters
----------
idx: :py:class:`int`
Field index
Returns
-------
:
ASN.1 type
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If given position is out of fields range
"""
try:
return self.__namedTypes[idx].asn1Object
except IndexError:
raise error.PyAsn1Error('Type position out of range')
def getPositionByType(self, tagSet):
"""Return field position by its ASN.1 type.
Parameters
----------
tagSet: :class:`~pysnmp.type.tag.TagSet`
ASN.1 tag set distinguishing one ASN.1 type from others.
Returns
-------
: :py:class:`int`
ASN.1 type position in fields set
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If *tagSet* is not present or ASN.1 types are not unique within callee *NamedTypes*
"""
try:
return self.__tagToPosMap[tagSet]
except KeyError:
raise error.PyAsn1Error('Type %s not found' % (tagSet,))
def getNameByPosition(self, idx):
"""Return field name by its position in fields set.
Parameters
----------
idx: :py:class:`idx`
Field index
Returns
-------
: :py:class:`str`
Field name
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If given field name is not present in callee *NamedTypes*
"""
try:
return self.__namedTypes[idx].name
except IndexError:
raise error.PyAsn1Error('Type position out of range')
def getPositionByName(self, name):
"""Return field position by filed name.
Parameters
----------
name: :py:class:`str`
Field name
Returns
-------
: :py:class:`int`
Field position in fields set
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If *name* is not present or not unique within callee *NamedTypes*
"""
try:
return self.__nameToPosMap[name]
except KeyError:
raise error.PyAsn1Error('Name %s not found' % (name,))
def getTagMapNearPosition(self, idx):
"""Return ASN.1 types that are allowed at or past given field position.
Some ASN.1 serialization allow for skipping optional and defaulted fields.
Some constructed ASN.1 types allow reordering of the fields. When recovering
such objects it may be important to know which types can possibly be
present at any given position in the field sets.
Parameters
----------
idx: :py:class:`int`
Field index
Returns
-------
: :class:`~pyasn1.type.tagmap.TagMap`
Map if ASN.1 types allowed at given field position
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If given position is out of fields range
"""
try:
return self.__ambigiousTypes[idx].getTagMap()
except KeyError:
raise error.PyAsn1Error('Type position out of range')
def getPositionNearType(self, tagSet, idx):
"""Return the closest field position where given ASN.1 type is allowed.
Some ASN.1 serialization allow for skipping optional and defaulted fields.
Some constructed ASN.1 types allow reordering of the fields. When recovering
such objects it may be important to know at which field position, in field set,
given *tagSet* is allowed at or past *idx* position.
Parameters
----------
tagSet: :class:`~pyasn1.type.tag.TagSet`
ASN.1 type which field position to look up
idx: :py:class:`int`
Field position at or past which to perform ASN.1 type look up
Returns
-------
: :py:class:`int`
Field position in fields set
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If *tagSet* is not present or not unique within callee *NamedTypes*
or *idx* is out of fields range
"""
try:
return idx + self.__ambigiousTypes[idx].getPositionByType(tagSet)
except KeyError:
raise error.PyAsn1Error('Type position out of range')
@property
def minTagSet(self):
"""Return the minimal TagSet among ASN.1 type in callee *NamedTypes*.
Some ASN.1 types/serialization protocols require ASN.1 types to be
arranged based on their numerical tag value. The *minTagSet* property
returns that.
Returns
-------
: :class:`~pyasn1.type.tagset.TagSet`
Minimal TagSet among ASN.1 types in callee *NamedTypes*
"""
if self.__minTagSet is None:
for namedType in self.__namedTypes:
asn1Object = namedType.asn1Object
try:
tagSet = asn1Object.getMinTagSet()
except AttributeError:
tagSet = asn1Object.tagSet
if self.__minTagSet is None or tagSet < self.__minTagSet:
self.__minTagSet = tagSet
return self.__minTagSet
def getTagMap(self, unique=False):
"""Create a *TagMap* object from tags and types recursively.
Create a new :class:`~pyasn1.type.tagmap.TagMap` object by
combining tags from *TagMap* objects of children types and
associating them with their immediate child type.
Example
-------
.. code-block:: python
OuterType ::= CHOICE {
innerType INTEGER
}
Calling *.getTagMap()* on *OuterType* will yield a map like this:
.. code-block:: python
Integer.tagSet -> Choice
Parameters
----------
unique: :py:class:`bool`
If `True`, duplicate *TagSet* objects occurring while building
new *TagMap* would cause error.
Returns
-------
: :class:`~pyasn1.type.tagmap.TagMap`
New *TagMap* holding *TagSet* object gathered from childen types.
"""
if unique not in self.__tagMap:
presentTypes = {}
skipTypes = {}
defaultType = None
for namedType in self.__namedTypes:
tagMap = namedType.asn1Object.tagMap
for tagSet in tagMap:
if unique and tagSet in presentTypes:
raise error.PyAsn1Error('Non-unique tagSet %s' % (tagSet,))
presentTypes[tagSet] = namedType.asn1Object
skipTypes.update(tagMap.skipTypes)
if defaultType is None:
defaultType = tagMap.defaultType
elif tagMap.defaultType is not None:
raise error.PyAsn1Error('Duplicate default ASN.1 type at %s' % (self,))
self.__tagMap[unique] = tagmap.TagMap(presentTypes, skipTypes, defaultType)
return self.__tagMap[unique]
@property
def hasOptionalOrDefault(self):
if self.__hasOptionalOrDefault is None:
self.__hasOptionalOrDefault = bool([True for namedType in self.__namedTypes if namedType.isDefaulted or namedType.isOptional])
return self.__hasOptionalOrDefault
@property
def namedTypes(self):
return iter(self.__namedTypes)
@property
def requiredComponents(self):
if self.__requiredComponents is None:
self.__requiredComponents = frozenset(
[idx for idx, nt in enumerate(self.__namedTypes) if not nt.isOptional and not nt.isDefaulted]
)
return self.__requiredComponents
| gpl-2.0 |
anisyonk/pilot | DBReleaseHandler.py | 4 | 12301 | import os
from PilotErrors import PilotErrors
from pUtil import tolog, readpar
class DBReleaseHandler:
"""
Methods for handling the DBRelease file and possibly skip it in the input file list
In the presence of $[VO_ATLAS_SW_DIR|OSG_APP]/database, the pilot will use these methods to:
1. Extract the requested DBRelease version from the job parameters string, if present
2. Scan the $[VO_ATLAS_SW_DIR|OSG_APP]/database dir for available DBRelease files
3. If the requested DBRelease file is available, continue [else, abort at this point]
4. Create a DBRelease setup file containing necessary environment variables
5. Create a new DBRelease file only containing the setup file in the input file directory
6. Update the job state file
7. Remove the DBRelease file from the input file list if all previous steps finished correctly
"""
# private data members
__error = PilotErrors() # PilotErrors object
__version = ""
__DBReleaseDir = ""
__filename = "DBRelease-%s.tar.gz"
__setupFilename = "setup.py"
__workdir = ""
def __init__(self, workdir=""):
""" Default initialization """
_path = self.getDBReleaseDir() # _path is a dummy variable
self.__workdir = workdir
def removeDBRelease(self, inputFiles, inFilesGuids, realDatasetsIn, dispatchDblock, dispatchDBlockToken, prodDBlockToken):
""" remove the given DBRelease files from the input file list """
# will only remove the DBRelease files that are already available locally
# identify all DBRelease files in the list (mark all for removal)
# note: multi-trf jobs tend to have the same DBRelease file listed twice
position = 0
positions_list = []
for f in inputFiles:
if "DBRelease" in f:
positions_list.append(position)
tolog("Will remove file %s from input file list" % (f))
position += 1
# remove the corresponding guids, datasets and tokens
for position in positions_list:
try:
del(inputFiles[position])
except Exception, e:
tolog("!!WARNING!!1990!! Could not delete object %d in inFiles: %s" % (position, str(e)))
else:
tolog("Removed item %d in inFiles" % (position))
try:
del(inFilesGuids[position])
except Exception, e:
tolog("!!WARNING!!1990!! Could not delete object %d in inFilesGuids: %s" % (position, str(e)))
else:
tolog("Removed item %d in inFilesGuids" % (position))
try:
del(realDatasetsIn[position])
except Exception, e:
tolog("!!WARNING!!1990!! Could not delete object %d in realDatasetsIn: %s" % (position, str(e)))
else:
tolog("Removed item %d in realDatasetsIn" % (position))
try:
del(dispatchDblock[position])
except Exception, e:
tolog("!!WARNING!!1990!! Could not delete object %d in dispatchDblock: %s" % (position, str(e)))
else:
tolog("Removed item %d in dispatchDblock" % (position))
try:
del(dispatchDBlockToken[position])
except Exception, e:
tolog("!!WARNING!!1990!! Could not delete object %d in dispatchDBlockToken: %s" % (position, str(e)))
else:
tolog("Removed item %d in dispatchDBlockToken" % (position))
try:
del(prodDBlockToken[position])
except Exception, e:
tolog("!!WARNING!!1990!! Could not delete object %d in prodDBlockToken: %s" % (position, str(e)))
else:
tolog("Removed item %d in prodDBlockToken" % (position))
return inputFiles, inFilesGuids, realDatasetsIn, dispatchDblock, dispatchDBlockToken, prodDBlockToken
def extractVersion(self, name):
""" Try to extract the version from the string name """
version = ""
import re
re_v = re.compile('DBRelease-(\d+\.\d+\.\d+)\.tar\.gz')
v = re_v.search(name)
if v:
version = v.group(1)
else:
re_v = re.compile('DBRelease-(\d+\.\d+\.\d+\.\d+)\.tar\.gz')
v = re_v.search(name)
if v:
version = v.group(1)
return version
def getDBReleaseVersion(self, jobPars=""):
""" Get the DBRelease version from the job parameters string """
version = ""
# get the version from the job parameters
if jobPars != "":
version = self.extractVersion(jobPars)
else:
# get the version from private data member (already set earlier)
version = self.__version
return version
def getDBReleaseDir(self):
""" Return the proper DBRelease directory """
if os.environ.has_key('VO_ATLAS_SW_DIR'):
path = os.path.expandvars('$VO_ATLAS_SW_DIR/database/DBRelease')
else:
path = os.path.expandvars('$OSG_APP/database/DBRelease')
if path == "" or path.startswith('OSG_APP'):
tolog("Note: The DBRelease database directory is not available (will not attempt to skip DBRelease stage-in)")
else:
if os.path.exists(path):
tolog("Local DBRelease path verified: %s (will attempt to skip DBRelease stage-in)" % (path))
self.__DBReleaseDir = path
else:
tolog("Note: Local DBRelease path does not exist: %s (will not attempt to skip DBRelease stage-in)" % (path))
path = ""
return path
def isDBReleaseAvailable(self, versionFromJobPars):
""" Check whether a given DBRelease file is already available """
status = False
self.__version = versionFromJobPars
# do not proceed if
if os.environ.has_key('ATLAS_DBREL_DWNLD'):
tolog("ATLAS_DBREL_DWNLD is set: do not skip DBRelease stage-in")
return status
# get the local path to the DBRelease directory
path = self.getDBReleaseDir()
if path != "":
if os.path.exists(path):
# get the list of available DBRelease directories
dir_list = os.listdir(path)
# is the required DBRelease version available?
if dir_list != []:
if self.__version != "":
if self.__version in dir_list:
tolog("Found version %s in path %s (%d releases found)" % (self.__version, path, len(dir_list)))
status = True
else:
tolog("Did not find version %s in path %s (%d releases found)" % (self.__version, path, len(dir_list)))
else:
tolog("Empty directory list: %s" % (path))
return status
def createSetupFile(self, version, path):
""" Create the DBRelease setup file """
status = False
# get the DBRelease directory
d = self.__DBReleaseDir
if d != "" and version != "":
# create the python code string to be written to file
txt = "import os\n"
txt += "os.environ['DBRELEASE'] = '%s'\n" % (version)
txt += "os.environ['DATAPATH'] = '%s/%s:' + os.environ['DATAPATH']\n" % (d, version)
txt += "os.environ['DBRELEASE_REQUIRED'] = '%s'\n" % (version)
txt += "os.environ['DBRELEASE_REQUESTED'] = '%s'\n" % (version)
txt += "os.environ['CORAL_DBLOOKUP_PATH'] = '%s/%s/XMLConfig'\n" % (d, version)
try:
f = open(os.path.join(path, self.__setupFilename), "w")
except OSError, e:
tolog("!!WARNING!!1990!! Failed to create DBRelease %s: %s" % (self.__setupFilename, str(e)))
else:
f.write(txt)
f.close()
tolog("Created setup file with the following content:.................................\n%s" % (txt))
tolog("...............................................................................")
status = True
return status
def mkdirs(self, path, d):
""" Create directory d in path """
status = False
try:
_dir = os.path.join(path, d)
os.makedirs(_dir)
except OSError, e:
tolog("!!WARNING!!1990!! Failed to create directories %s: %s" % (_dir, str(e)))
else:
status = True
return status
def rmdirs(self, path):
""" Remove directory in path """
status = False
try:
from shutil import rmtree
rmtree(path)
except OSError, e:
tolog("!!WARNING!!1990!! Failed to remove directories %s: %s" % (path, str(e)))
else:
status = True
return status
def createDBRelease(self, version, path):
""" Create the DBRelease file only containing a setup file """
status = False
# create the DBRelease and version directories
DBRelease_path = os.path.join(path, 'DBRelease')
if self.mkdirs(DBRelease_path, version):
# create the setup file in the DBRelease directory
version_path = os.path.join(DBRelease_path, version)
if self.createSetupFile(version, version_path):
tolog("Created DBRelease %s in new directory %s" % (self.__setupFilename, version_path))
# now create a new DBRelease tarball
filename = os.path.join(path, self.__filename % (version))
import tarfile
tolog("Attempting to create %s" % (filename))
try:
tar = tarfile.open(filename, "w:gz")
except Exception, e:
tolog("!!WARNING!!1990!! Could not create DBRelease tar file: %s" % str(e))
else:
if tar:
# add the setup file to the tar file
tar.add("%s/DBRelease/%s/%s" % (path, version, self.__setupFilename))
# create the symbolic link DBRelease/current -> 12.2.1
try:
_link = os.path.join(path, "DBRelease/current")
os.symlink(version, _link)
except Exception, e:
tolog("!!WARNING!!1990!! Failed to create symbolic link %s: %s" % (_link, str(e)))
else:
tolog("Created symbolic link %s" % (_link))
# add the symbolic link to the tar file
tar.add(_link)
# done with the tar archive
tar.close()
tolog("Created new DBRelease tar file: %s" % filename)
status = True
else:
tolog("!!WARNING!!1990!! Failed to create DBRelease tar file")
# clean up
if self.rmdirs(DBRelease_path):
tolog("Cleaned up directories in path %s" % (DBRelease_path))
else:
tolog("Failed to create DBRelease %s" % (self.__setupFilename))
if self.rmdirs(DBRelease_path):
tolog("Cleaned up directories in path %s" % (DBRelease_path))
return status
if __name__ == "__main__":
h = DBReleaseHandler()
jobPars="jobParametersInputEvgenFile=EVNT.162766._000363.pool.root.1 OutputHitsFile=6daee77d-1d29-4901-a04e-d532a5e1812f_1.HITS.pool.root MaxEvents=2 SkipEvents=0 RandomSeed=164832 GeometryVersion=ATLAS-GEO-16-00-00 PhysicsList=QGSP_BERT JobConfig=VertexFromCondDB.py,jobConfig.LooperKiller.py,CalHits.py,jobConfig.LucidOn.py DBRelease=DBRelease-12.2.1.tar.gz ConditionsTag=OFLCOND-SDR-BS7T-02 IgnoreConfigError=False AMITag=s932"
version = h.getDBReleaseVersion(jobPars=jobPars)
tolog("Version = %s" % (version))
if h.createDBRelease(version, os.getcwd()):
pass
| apache-2.0 |
woooooojoo/flask001 | lib/flask/signals.py | 783 | 2140 | # -*- coding: utf-8 -*-
"""
flask.signals
~~~~~~~~~~~~~
Implements signals based on blinker if available, otherwise
falls silently back to a noop
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
signals_available = False
try:
from blinker import Namespace
signals_available = True
except ImportError:
class Namespace(object):
def signal(self, name, doc=None):
return _FakeSignal(name, doc)
class _FakeSignal(object):
"""If blinker is unavailable, create a fake class with the same
interface that allows sending of signals but will fail with an
error on anything else. Instead of doing anything on send, it
will just ignore the arguments and do nothing instead.
"""
def __init__(self, name, doc=None):
self.name = name
self.__doc__ = doc
def _fail(self, *args, **kwargs):
raise RuntimeError('signalling support is unavailable '
'because the blinker library is '
'not installed.')
send = lambda *a, **kw: None
connect = disconnect = has_receivers_for = receivers_for = \
temporarily_connected_to = connected_to = _fail
del _fail
# the namespace for code signals. If you are not flask code, do
# not put signals in here. Create your own namespace instead.
_signals = Namespace()
# core signals. For usage examples grep the sourcecode or consult
# the API documentation in docs/api.rst as well as docs/signals.rst
template_rendered = _signals.signal('template-rendered')
request_started = _signals.signal('request-started')
request_finished = _signals.signal('request-finished')
request_tearing_down = _signals.signal('request-tearing-down')
got_request_exception = _signals.signal('got-request-exception')
appcontext_tearing_down = _signals.signal('appcontext-tearing-down')
appcontext_pushed = _signals.signal('appcontext-pushed')
appcontext_popped = _signals.signal('appcontext-popped')
message_flashed = _signals.signal('message-flashed')
| apache-2.0 |
uwosh/COBAInternship | config.py | 1 | 1615 | # -*- coding: utf-8 -*-
#
# File: COBAInternship.py
#
# Copyright (c) 2008 by []
# Generator: ArchGenXML Version 2.1
# http://plone.org/products/archgenxml
#
# GNU General Public License (GPL)
#
__author__ = """Andrew Schultz and Josh Klotz"""
__docformat__ = 'plaintext'
# Product configuration.
#
# The contents of this module will be imported into __init__.py, the
# workflow configuration and every content type module.
#
# If you wish to perform custom configuration, you may put a file
# AppConfig.py in your product's root directory. The items in there
# will be included (by importing) in this file if found.
from Products.CMFCore.permissions import setDefaultRoles
##code-section config-head #fill in your manual code here
##/code-section config-head
PROJECTNAME = "COBAInternship"
# Permissions
DEFAULT_ADD_CONTENT_PERMISSION = "Add portal content"
setDefaultRoles(DEFAULT_ADD_CONTENT_PERMISSION, ('Manager', 'Owner'))
ADD_CONTENT_PERMISSIONS = {
'COBAInternship': 'COBAInternship: Add COBAInternship',
}
setDefaultRoles('COBAInternship: Add COBAInternship', ('Manager','Owner'))
product_globals = globals()
# Dependencies of Products to be installed by quick-installer
# override in custom configuration
DEPENDENCIES = []
# Dependend products - not quick-installed - used in testcase
# override in custom configuration
PRODUCT_DEPENDENCIES = []
##code-section config-bottom #fill in your manual code here
##/code-section config-bottom
# Load custom configuration not managed by archgenxml
try:
from Products.COBAInternship.AppConfig import *
except ImportError:
pass
| gpl-2.0 |
marekjm/diaspy | diaspy/models.py | 1 | 23071 | #!/usr/bin/env python3
"""This module is only imported in other diaspy modules and
MUST NOT import anything.
"""
import json
import copy
import re
BS4_SUPPORT=False
try:
from bs4 import BeautifulSoup
except ImportError:
print("[diaspy] BeautifulSoup not found, falling back on regex.")
else: BS4_SUPPORT=True
from diaspy import errors
class Aspect():
"""This class represents an aspect.
Class can be initialized by passing either an id and/or name as
parameters.
If both are missing, an exception will be raised.
"""
def __init__(self, connection, id, name=None):
self._connection = connection
self.id, self.name = id, name
self._cached = []
def getUsers(self, fetch = True):
"""Returns list of GUIDs of users who are listed in this aspect.
"""
if fetch:
request = self._connection.get('contacts.json?a_id={}'.format(self.id))
self._cached = request.json()
return self._cached
def removeAspect(self):
"""
--> POST /aspects/{id} HTTP/1.1
--> _method=delete&authenticity_token={token}
<-- HTTP/1.1 302 Found
Removes whole aspect.
:returns: None
"""
request = self._connection.tokenFrom('contacts').delete('aspects/{}'.format(self.id))
if request.status_code != 302:
raise errors.AspectError('wrong status code: {0}'.format(request.status_code))
def addUser(self, user_id):
"""Add user to current aspect.
:param user_id: user to add to aspect
:type user_id: int
:returns: JSON from request
--> POST /aspect_memberships HTTP/1.1
--> Accept: application/json, text/javascript, */*; q=0.01
--> Content-Type: application/json; charset=UTF-8
--> {"aspect_id":123,"person_id":123}
<-- HTTP/1.1 200 OK
"""
data = {'aspect_id': self.id,
'person_id': user_id}
headers = {'content-type': 'application/json',
'accept': 'application/json'}
request = self._connection.tokenFrom('contacts').post('aspect_memberships', data=json.dumps(data), headers=headers)
if request.status_code == 400:
raise errors.AspectError('duplicate record, user already exists in aspect: {0}'.format(request.status_code))
elif request.status_code == 404:
raise errors.AspectError('user not found from this pod: {0}'.format(request.status_code))
elif request.status_code != 200:
raise errors.AspectError('wrong status code: {0}'.format(request.status_code))
response = None
try:
response = request.json()
except json.decoder.JSONDecodeError:
""" Should be OK now, but I'll leave this commentary here
at first to see if anything comes up """
# FIXME For some (?) reason removing users from aspects works, but
# adding them is a no-go and Diaspora* kicks us out with CSRF errors.
# Weird.
pass
if response is None:
raise errors.CSRFProtectionKickedIn()
# Now you should fetchguid(fetch_stream=False) on User to update aspect membership_id's
# Or update it locally with the response
return response
def removeUser(self, user):
"""Remove user from current aspect.
:param user: user to remove from aspect
:type user: diaspy.people.User object
"""
membership_id = None
to_remove = None
for each in user.aspectMemberships():
if each.get('aspect', {}).get('id') == self.id:
membership_id = each.get('id')
to_remove = each
break # no need to continue
if membership_id is None:
raise errors.UserIsNotMemberOfAspect(user, self)
request = self._connection.delete('aspect_memberships/{0}'.format(membership_id))
if request.status_code == 404:
raise errors.AspectError('cannot remove user from aspect, probably tried too fast after adding: {0}'.format(request.status_code))
elif request.status_code != 200:
raise errors.AspectError('cannot remove user from aspect: {0}'.format(request.status_code))
if 'contact' in user.data: # User object
if to_remove: user.data['contact']['aspect_memberships'].remove( to_remove ) # remove local aspect membership_id
else: # User object from Contacts()
if to_remove: user.data['aspect_memberships'].remove( to_remove ) # remove local aspect membership_id
return request.json()
class Notification():
"""This class represents single notification.
"""
_who_regexp = re.compile(r'/people/([0-9a-f]+)["\']{1} class=["\']{1}hovercardable')
_aboutid_regexp = re.compile(r'/posts/[0-9a-f]+')
_htmltag_regexp = re.compile('</?[a-z]+( *[a-z_-]+=["\'].*?["\'])* */?>')
def __init__(self, connection, data):
self._connection = connection
self.type = data['type']
self._data = data[self.type]
self.id = self._data['id']
self.unread = self._data['unread']
def __getitem__(self, key):
"""Returns a key from notification data.
"""
return self._data[key]
def __str__(self):
"""Returns notification note.
"""
if BS4_SUPPORT:
soup = BeautifulSoup(self._data['note_html'], 'lxml')
media_body = soup.find('div', {"class": "media-body"})
div = media_body.find('div')
if div: div.decompose()
return media_body.getText().strip()
else:
string = re.sub(self._htmltag_regexp, '', self._data['note_html'])
string = string.strip().split('\n')[0]
while ' ' in string: string = string.replace(' ', ' ')
return string
def __repr__(self):
"""Returns notification note with more details.
"""
return '{0}: {1}'.format(self.when(), str(self))
def about(self):
"""Returns id of post about which the notification is informing OR:
If the id is None it means that it's about user so .who() is called.
"""
if BS4_SUPPORT:
soup = BeautifulSoup(self._data['note_html'], 'lxml')
id = soup.find('a', {"data-ref": True})
if id: return id['data-ref']
about = self._aboutid_regexp.search(self._data['note_html'])
if about is None: about = self.who()[0]
else: about = int(about.group(0)[7:])
return about
def who(self):
"""Returns list of guids of the users who caused you to get the notification.
"""
if BS4_SUPPORT: # Parse the HTML with BS4
soup = BeautifulSoup(self._data['note_html'], 'lxml')
hovercardable_soup = soup.findAll('a', {"class": "hovercardable"})
return list(set([soup['href'][8:] for soup in hovercardable_soup]))
else:
return list(set([who for who in self._who_regexp.findall(self._data['note_html'])]))
def when(self):
"""Returns UTC time as found in note_html.
"""
return self._data['created_at']
def mark(self, unread=False):
"""Marks notification to read/unread.
Marks notification to read if `unread` is False.
Marks notification to unread if `unread` is True.
:param unread: which state set for notification
:type unread: bool
"""
headers = {'x-csrf-token': repr(self._connection)}
params = {'set_unread': json.dumps(unread)}
self._connection.put('notifications/{0}'.format(self['id']), params=params, headers=headers)
self._data['unread'] = unread
class Conversation():
"""This class represents a conversation.
.. note::
Remember that you need to have access to the conversation.
"""
if not BS4_SUPPORT:
_message_stream_regexp = re.compile(r'<div class=["\']{1}stream["\']{1}>(.*?)<div class=["\']{1}stream-element new-message["\']{1}>', re.DOTALL)
_message_guid_regexp = re.compile(r'data-guid=["\']{1}([0-9]+)["\']{1}')
_message_created_at_regexp = re.compile(r'<time datetime=["\']{1}([0-9]{4}-[0-9]{2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}Z)["\']{1}')
_message_body_regexp = re.compile(r'<div class=["\']{1}message-content["\']{1}>\s+<p>(.*?)</p>\s+</div>', re.DOTALL)
_message_author_guid_regexp = re.compile(r'<a href=["\']{1}/people/([a-f0-9]+)["\']{1} class=["\']{1}img')
_message_author_name_regexp = re.compile(r'<img alt=["\']{1}(.*?)["\']{1}.*')
_message_author_avatar_regexp = re.compile(r'src=["\']{1}(.*?)["\']{1}')
def __init__(self, connection, id, fetch=True):
"""
:param conv_id: id of the post and not the guid!
:type conv_id: str
:param connection: connection object used to authenticate
:type connection: connection.Connection
"""
self._connection = connection
self.id = id
self._data = {}
self._messages = []
if fetch: self._fetch()
def __len__(self): return len(self._messages)
def __iter__(self): return iter(self._messages)
def __getitem__(self, n): return self._messages[n]
def _fetch(self):
"""Fetches JSON data representing conversation.
"""
request = self._connection.get('conversations/{}.json'.format(self.id))
if request.status_code == 200:
self._data = request.json()['conversation']
else:
raise errors.ConversationError('cannot download conversation data: {0}'.format(request.status_code))
def _fetch_messages(self):
"""Fetches HTML data we will use to parse message data.
This is a workaround until Diaspora* has it's API plans implemented.
"""
request = self._connection.get('conversations/{}'.format(self.id))
if request.status_code == 200:
# Clear potential old messages
self._messages = []
message_template = {
'guid' : None,
'created_at' : None,
'body' : None,
'author' : {
'guid' : None,
'diaspora_id' : None, # TODO? Not able to get from this page.
'name' : None,
'avatar' : None
}
}
if BS4_SUPPORT: # Parse the HTML with BS4
soup = BeautifulSoup(request.content, 'lxml')
messages_soup = soup.findAll('div', {"class": "stream-element message"})
for message_soup in messages_soup:
message = copy.deepcopy(message_template)
# guid
if message_soup and message_soup.has_attr('data-guid'):
message['guid'] = message_soup['data-guid']
# created_at
time_soup = message_soup.find('time', {"class": "timeago"})
if time_soup and time_soup.has_attr('datetime'):
message['created_at'] = time_soup['datetime']
# body
body_soup = message_soup.find('div', {"class": "message-content"})
if body_soup: message['body'] = body_soup.get_text().strip()
# author
author_a_soup = message_soup.find('a', {"class": "img"})
if author_a_soup:
# author guid
message['author']['guid'] = author_a_soup['href'][8:]
# name and avatar
author_img_soup = author_a_soup.find('img', {"class": "avatar"})
if author_img_soup:
message['author']['name'] = author_img_soup['title']
message['author']['avatar'] = author_img_soup['src']
self._messages.append(message.copy())
else: # Regex fallback
messages_stream_html = self._message_stream_regexp.search(request.content.decode('utf-8'))
if messages_stream_html:
messages_html = messages_stream_html.group(1).split("<div class='stream-element message'")
for message_html in messages_html:
message = copy.deepcopy(message_template)
# Guid
guid = self._message_guid_regexp.search(message_html)
if guid: message['guid'] = guid.group(1)
else: continue
# Created at
created_at = self._message_created_at_regexp.search(message_html)
if created_at: message['created_at'] = created_at.group(1)
# Body
body = self._message_body_regexp.search(message_html)
if body: message['body'] = body.group(1)
# Author
author_guid = self._message_author_guid_regexp.search(message_html)
if author_guid: message['author']['guid'] = author_guid.group(1)
author_name = self._message_author_name_regexp.search(message_html)
if author_name:
message['author']['name'] = author_name.group(1)
author_avatar = self._message_author_avatar_regexp.search(author_name.group(0))
if author_avatar: message['author']['avatar'] = author_avatar.group(1)
self._messages.append(message.copy())
else:
raise errors.ConversationError('cannot download message data from conversation: {0}'.format(request.status_code))
def messages(self): return self._messages
def update_messages(self):
"""(Re-)fetches messages in this conversation.
"""
self._fetch_messages()
def answer(self, text):
"""Answer that conversation
:param text: text to answer.
:type text: str
"""
data = {'message[text]': text,
'utf8': '✓',
'authenticity_token': repr(self._connection)}
request = self._connection.post('conversations/{}/messages'.format(self.id),
data=data,
headers={'accept': 'application/json'})
if request.status_code != 200:
raise errors.ConversationError('{0}: Answer could not be posted.'
.format(request.status_code))
return request.json()
def delete(self):
"""Delete this conversation.
Has to be implemented.
"""
data = {'authenticity_token': repr(self._connection)}
request = self._connection.delete('conversations/{0}/visibility/'
.format(self.id),
data=data,
headers={'accept': 'application/json'})
if request.status_code != 404:
raise errors.ConversationError('{0}: Conversation could not be deleted.'
.format(request.status_code))
def get_subject(self):
"""Returns the subject of this conversation
"""
return self._data['subject']
class Comment():
"""Represents comment on post.
Does not require Connection() object. Note that you should not manually
create `Comment()` objects -- they are designed to be created automatically
by `Comments()` objects wich automatically will be created by `Post()`
objects.
"""
def __init__(self, data):
self._data = data
self.id = data['id']
self.guid = data['guid']
def __str__(self):
"""Returns comment's text.
"""
return self._data['text']
def __repr__(self):
"""Returns comments text and author.
Format: AUTHOR (AUTHOR'S GUID): COMMENT
"""
return '{0} ({1}): {2}'.format(self.author(), self.author('guid'), str(self))
def when(self):
"""Returns time when the comment had been created.
"""
return self._data['created_at']
def author(self, key='name'):
"""Returns author of the comment.
"""
return self._data['author'][key]
class Comments():
def __init__(self, comments=[]):
self._comments = comments
def __iter__(self):
for comment in self._comments: yield comment
def __len__(self):
return len(self._comments)
def __getitem__(self, index):
if self._comments: return self._comments[index]
def __bool__(self):
if self._comments: return True
return False
def ids(self):
return [c.id for c in self._comments]
def add(self, comment):
""" Expects Comment() object
:param comment: Comment() object to add.
:type comment: Comment() object."""
if comment and type(comment) == Comment: self._comments.append(comment)
def set(self, comments):
"""Sets comments wich already have a Comment() obj
:param comments: list with Comment() objects to set.
:type comments: list.
"""
if comments: self._comments = comments
def set_json(self, json_comments):
"""Sets comments for this post from post data."""
if json_comments:
self._comments = [Comment(c) for c in json_comments]
class Post():
"""This class represents a post.
.. note::
Remember that you need to have access to the post.
"""
def __init__(self, connection, id=0, guid='', fetch=True, comments=True, post_data=None):
"""
:param id: id of the post (GUID is recommended)
:type id: int
:param guid: GUID of the post
:type guid: str
:param connection: connection object used to authenticate
:type connection: connection.Connection
:param fetch: defines whether to fetch post's data or not
:type fetch: bool
:param comments: defines whether to fetch post's comments or not (if True also data will be fetched)
:type comments: bool
:param post_data: contains post data so no need to fetch the post if this is set, until you want to update post data
:type: json
"""
if not (guid or id): raise TypeError('neither guid nor id was provided')
self._connection = connection
self.id = id
self.guid = guid
self._data = {}
self.comments = Comments()
if post_data:
self._data = post_data
if fetch: self._fetchdata()
if comments:
if not self._data: self._fetchdata()
self._fetchcomments()
else:
if not self._data: self._fetchdata()
self.comments.set_json( self.data()['interactions']['comments'] )
def __repr__(self):
"""Returns string containing more information then str().
"""
return '{0} ({1}): {2}'.format(self._data['author']['name'], self._data['author']['guid'], self._data['text'])
def __str__(self):
"""Returns text of a post.
"""
return self._data['text']
def _fetchdata(self):
"""This function retrieves data of the post.
:returns: guid of post whose data was fetched
"""
if self.id: id = self.id
if self.guid: id = self.guid
request = self._connection.get('posts/{0}.json'.format(id))
if request.status_code != 200:
raise errors.PostError('{0}: could not fetch data for post: {1}'.format(request.status_code, id))
elif request:
self._data = request.json()
return self.data()['guid']
def _fetchcomments(self):
"""Retreives comments for this post.
Retrieving comments via GUID will result in 404 error.
DIASPORA* does not supply comments through /posts/:guid/ endpoint.
"""
id = self.data()['id']
if self.data()['interactions']['comments_count']:
request = self._connection.get('posts/{0}/comments.json'.format(id))
if request.status_code != 200:
raise errors.PostError('{0}: could not fetch comments for post: {1}'.format(request.status_code, id))
else:
self.comments.set([Comment(c) for c in request.json()])
def fetch(self, comments = False):
"""Fetches post data.
"""
self._fetchdata()
if comments:
self._fetchcomments()
return self
def data(self, data = None):
if data is not None:
self._data = data
return self._data
def like(self):
"""This function likes a post.
It abstracts the 'Like' functionality.
:returns: dict -- json formatted like object.
"""
data = {'authenticity_token': repr(self._connection)}
request = self._connection.post('posts/{0}/likes'.format(self.id),
data=data,
headers={'accept': 'application/json'})
if request.status_code != 201:
raise errors.PostError('{0}: Post could not be liked.'
.format(request.status_code))
likes_json = request.json()
if likes_json:
self._data['interactions']['likes'] = [likes_json]
return likes_json
def reshare(self):
"""This function reshares a post
"""
data = {'root_guid': self._data['guid'],
'authenticity_token': repr(self._connection)}
request = self._connection.post('reshares',
data=data,
headers={'accept': 'application/json'})
if request.status_code != 201:
raise Exception('{0}: Post could not be reshared'.format(request.status_code))
return request.json()
def comment(self, text):
"""This function comments on a post
:param text: text to comment.
:type text: str
"""
data = {'text': text,
'authenticity_token': repr(self._connection)}
request = self._connection.post('posts/{0}/comments'.format(self.id),
data=data,
headers={'accept': 'application/json'})
if request.status_code != 201:
raise Exception('{0}: Comment could not be posted.'
.format(request.status_code))
return Comment(request.json())
def vote_poll(self, poll_answer_id):
"""This function votes on a post's poll
:param poll_answer_id: id to poll vote.
:type poll_answer_id: int
"""
poll_id = self._data['poll']['poll_id']
data = {'poll_answer_id': poll_answer_id,
'poll_id': poll_id,
'post_id': self.id,
'authenticity_token': repr(self._connection)}
request = self._connection.post('posts/{0}/poll_participations'.format(self.id),
data=data,
headers={'accept': 'application/json'})
if request.status_code != 201:
raise Exception('{0}: Vote on poll failed.'
.format(request.status_code))
return request.json()
def hide(self):
"""
-> PUT /share_visibilities/42 HTTP/1.1
post_id=123
<- HTTP/1.1 200 OK
"""
headers = {'x-csrf-token': repr(self._connection)}
params = {'post_id': json.dumps(self.id)}
request = self._connection.put('share_visibilities/42', params=params, headers=headers)
if request.status_code != 200:
raise Exception('{0}: Failed to hide post.'
.format(request.status_code))
def mute(self):
"""
-> POST /blocks HTTP/1.1
{"block":{"person_id":123}}
<- HTTP/1.1 204 No Content
"""
headers = {'content-type':'application/json', 'x-csrf-token': repr(self._connection)}
data = json.dumps({ 'block': { 'person_id' : self._data['author']['id'] } })
request = self._connection.post('blocks', data=data, headers=headers)
if request.status_code != 204:
raise Exception('{0}: Failed to block person'
.format(request.status_code))
def subscribe(self):
"""
-> POST /posts/123/participation HTTP/1.1
<- HTTP/1.1 201 Created
"""
headers = {'x-csrf-token': repr(self._connection)}
data = {}
request = self._connection.post('posts/{}/participation'
.format( self.id ), data=data, headers=headers)
if request.status_code != 201:
raise Exception('{0}: Failed to subscribe to post'
.format(request.status_code))
def unsubscribe(self):
"""
-> POST /posts/123/participation HTTP/1.1
_method=delete
<- HTTP/1.1 200 OK
"""
headers = {'x-csrf-token': repr(self._connection)}
data = { "_method": "delete" }
request = self._connection.post('posts/{}/participation'
.format( self.id ), headers=headers, data=data)
if request.status_code != 200:
raise Exception('{0}: Failed to unsubscribe to post'
.format(request.status_code))
def report(self):
"""
TODO
"""
pass
def delete(self):
""" This function deletes this post
"""
data = {'authenticity_token': repr(self._connection)}
request = self._connection.delete('posts/{0}'.format(self.id),
data=data,
headers={'accept': 'application/json'})
if request.status_code != 204:
raise errors.PostError('{0}: Post could not be deleted'.format(request.status_code))
def delete_comment(self, comment_id):
"""This function removes a comment from a post
:param comment_id: id of the comment to remove.
:type comment_id: str
"""
data = {'authenticity_token': repr(self._connection)}
request = self._connection.delete('posts/{0}/comments/{1}'
.format(self.id, comment_id),
data=data,
headers={'accept': 'application/json'})
if request.status_code != 204:
raise errors.PostError('{0}: Comment could not be deleted'
.format(request.status_code))
def delete_like(self):
"""This function removes a like from a post
"""
data = {'authenticity_token': repr(self._connection)}
url = 'posts/{0}/likes/{1}'.format(self.id, self._data['interactions']['likes'][0]['id'])
request = self._connection.delete(url, data=data)
if request.status_code != 204:
raise errors.PostError('{0}: Like could not be removed.'
.format(request.status_code))
def author(self, key='name'):
"""Returns author of the post.
:param key: all keys available in data['author']
"""
return self._data['author'][key]
| mit |
monouno/site | judge/migrations/0001_initial.py | 3 | 49547 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.core.validators
import django.db.models.deletion
import django.utils.timezone
import mptt.fields
from django.conf import settings
from django.db import models, migrations
import judge.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BlogPost',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100, verbose_name=b'Post title')),
('slug', models.SlugField(verbose_name=b'Slug')),
('visible', models.BooleanField(verbose_name=b'Public visibility')),
('sticky', models.BooleanField(verbose_name=b'Sticky')),
('publish_on', models.DateTimeField(verbose_name=b'Publish after')),
('content', models.TextField(verbose_name=b'Post content')),
('summary', models.TextField(verbose_name=b'Post summary', blank=True)),
],
options={
'permissions': (('see_hidden_post', 'See hidden posts'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('time', models.DateTimeField(auto_now_add=True, verbose_name=b'Posted time')),
('page', models.CharField(max_length=30, verbose_name=b'Associated Page', validators=[django.core.validators.RegexValidator(b'^[pc]:[a-z0-9]+$|^b:\\d+$|^s:', b'Page code must be ^[pc]:[a-z0-9]+$|^b:\\d+$')])),
('score', models.IntegerField(default=0, verbose_name=b'Votes')),
('title', models.CharField(max_length=200, verbose_name=b'Title of comment')),
('body', models.TextField(verbose_name=b'Body of comment', blank=True)),
('hidden', models.BooleanField(default=0, verbose_name=b'Hide the comment')),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CommentVote',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('score', models.IntegerField()),
('comment', models.ForeignKey(to='judge.Comment', related_name='votes')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Contest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(unique=True, max_length=20, verbose_name=b'Contest id', validators=[django.core.validators.RegexValidator(b'^[a-z0-9]+$', b'Contest id must be ^[a-z0-9]+$')])),
('name', models.CharField(max_length=100, verbose_name=b'Contest name', db_index=True)),
('description', models.TextField(blank=True)),
('start_time', models.DateTimeField(db_index=True)),
('end_time', models.DateTimeField(db_index=True)),
('time_limit', models.DurationField()),
('is_public', models.BooleanField(default=False, verbose_name=b'Publicly visible')),
('is_external', models.BooleanField(default=False, verbose_name=b'External contest')),
('is_rated', models.BooleanField(default=False, help_text=b'Whether this contest can be rated.')),
('rate_all', models.BooleanField(default=False, help_text=b'Rate all users who joined.')),
],
options={
'permissions': (('see_private_contest', 'See private contests'), ('edit_own_contest', 'Edit own contests'), ('edit_all_contest', 'Edit all contests'), ('contest_rating', 'Rate contests')),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ContestParticipation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('real_start', models.DateTimeField(default=django.utils.timezone.now, verbose_name=b'Start time', db_column=b'start')),
('score', models.IntegerField(default=0, verbose_name=b'score', db_index=True)),
('cumtime', models.PositiveIntegerField(default=0, verbose_name=b'Cumulative time')),
('contest', models.ForeignKey(related_name='users', verbose_name=b'Associated contest', to='judge.Contest')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ContestProblem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('points', models.IntegerField()),
('partial', models.BooleanField()),
('contest', models.ForeignKey(related_name='contest_problems', to='judge.Contest')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ContestProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('current', models.OneToOneField(related_name='+', null=True, on_delete=django.db.models.deletion.SET_NULL, blank=True, to='judge.ContestParticipation', verbose_name=b'Current contest')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ContestSubmission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('points', models.FloatField(default=0.0)),
('participation', models.ForeignKey(related_query_name=b'submission', related_name='submissions', to='judge.ContestParticipation')),
('problem', models.ForeignKey(related_query_name=b'submission', related_name='submissions', to='judge.ContestProblem')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Judge',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text=b'Server name, hostname-style', unique=True, max_length=50)),
('created', models.DateTimeField(auto_now_add=True)),
('auth_key', models.CharField(help_text=b'A key to authenticated this judge', max_length=100, verbose_name=b'Authentication key')),
('online', models.BooleanField(default=False)),
('last_connect', models.DateTimeField(null=True, verbose_name=b'Last connection time')),
('ping', models.FloatField(null=True, verbose_name=b'Response time')),
('load', models.FloatField(help_text=b'Load for the last minute, divided by processors to be fair.', null=True, verbose_name=b'System load')),
('description', models.TextField(blank=True)),
],
options={
'ordering': ['-online', 'load'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(unique=True, max_length=6, verbose_name=b'Short identifier')),
('name', models.CharField(max_length=20, verbose_name=b'Long name')),
('short_name', models.CharField(max_length=10, null=True, verbose_name=b'Short name', blank=True)),
('common_name', models.CharField(max_length=10, verbose_name=b'Common name')),
('ace', models.CharField(max_length=20, verbose_name=b'ACE mode name')),
('pygments', models.CharField(max_length=20, verbose_name=b'Pygments Name')),
('info', models.CharField(max_length=50, verbose_name=b'Basic runtime info', blank=True)),
('description', models.TextField(verbose_name=b'Description for model', blank=True)),
],
options={
'ordering': ['key'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MiscConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(max_length=30, db_index=True)),
('value', models.TextField(blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='NavigationBar',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('order', models.PositiveIntegerField(db_index=True)),
('key', models.CharField(unique=True, max_length=10, verbose_name=b'Identifier')),
('label', models.CharField(max_length=20)),
('path', models.CharField(max_length=30, verbose_name=b'Link path')),
('regex', models.TextField(verbose_name=b'Highlight regex', validators=[judge.models.validate_regex])),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('parent', mptt.fields.TreeForeignKey(related_name='children', verbose_name=b'Parent item', blank=True, to='judge.NavigationBar', null=True)),
],
options={
'verbose_name': 'navigation item',
'verbose_name_plural': 'navigation bar',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name=b'Organization title')),
('key', models.CharField(help_text=b'Organization name shows in URL', unique=True, max_length=6, verbose_name=b'Identifier', validators=[django.core.validators.RegexValidator(b'^[A-Za-z0-9]+$', b'Identifier must contain letters and numbers only')])),
('short_name', models.CharField(help_text=b'Displayed beside user name during contests', max_length=20, verbose_name=b'Short name')),
('about', models.TextField(verbose_name=b'Organization description')),
('creation_date', models.DateTimeField(auto_now_add=True, verbose_name=b'Creation date')),
],
options={
'ordering': ['key'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PrivateMessage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=50, verbose_name=b'Message title')),
('content', models.TextField(verbose_name=b'Message body')),
('timestamp', models.DateTimeField(auto_now_add=True, verbose_name=b'Message timestamp')),
('read', models.BooleanField(verbose_name=b'Read')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PrivateMessageThread',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('messages', models.ManyToManyField(to='judge.PrivateMessage', verbose_name=b'Messages in the thread')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Problem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('code', models.CharField(unique=True, max_length=20, verbose_name=b'Problem code', validators=[django.core.validators.RegexValidator(b'^[a-z0-9]+$', b'Problem code must be ^[a-z0-9]+$')])),
('name', models.CharField(max_length=100, verbose_name=b'Problem name', db_index=True)),
('description', models.TextField(verbose_name=b'Problem body')),
('time_limit', models.FloatField(verbose_name=b'Time limit')),
('memory_limit', models.IntegerField(verbose_name=b'Memory limit')),
('short_circuit', models.BooleanField(default=False)),
('points', models.FloatField(verbose_name=b'Points')),
('partial', models.BooleanField(verbose_name=b'Allows partial points')),
('is_public', models.BooleanField(db_index=True, verbose_name=b'Publicly visible')),
('date', models.DateTimeField(help_text=b"Doesn't have magic ability to auto-publish due to backward compatibility", null=True, verbose_name=b'Date of publishing', db_index=True, blank=True)),
('allowed_languages', models.ManyToManyField(to='judge.Language', verbose_name=b'Allowed languages')),
],
options={
'permissions': (('see_private_problem', 'See hidden problems'), ('edit_own_problem', 'Edit own problems'), ('edit_all_problem', 'Edit all problems'), ('clone_problem', 'Clone problem')),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProblemGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=20, verbose_name=b'Problem group ID')),
('full_name', models.CharField(max_length=100, verbose_name=b'Problem group name')),
],
options={
'ordering': ['full_name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProblemType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=20, verbose_name=b'Problem category ID')),
('full_name', models.CharField(max_length=100, verbose_name=b'Problem category name')),
],
options={
'ordering': ['full_name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, null=True, verbose_name=b'Display name', blank=True)),
('about', models.TextField(null=True, verbose_name=b'Self-description', blank=True)),
('timezone', models.CharField(default=b'America/Toronto', max_length=50, verbose_name=b'Location', choices=[(b'Africa', [(b'Africa/Abidjan', b'Abidjan'), (b'Africa/Accra', b'Accra'), (b'Africa/Addis_Ababa', b'Addis_Ababa'), (b'Africa/Algiers', b'Algiers'), (b'Africa/Asmara', b'Asmara'), (b'Africa/Asmera', b'Asmera'), (b'Africa/Bamako', b'Bamako'), (b'Africa/Bangui', b'Bangui'), (b'Africa/Banjul', b'Banjul'), (b'Africa/Bissau', b'Bissau'), (b'Africa/Blantyre', b'Blantyre'), (b'Africa/Brazzaville', b'Brazzaville'), (b'Africa/Bujumbura', b'Bujumbura'), (b'Africa/Cairo', b'Cairo'), (b'Africa/Casablanca', b'Casablanca'), (b'Africa/Ceuta', b'Ceuta'), (b'Africa/Conakry', b'Conakry'), (b'Africa/Dakar', b'Dakar'), (b'Africa/Dar_es_Salaam', b'Dar_es_Salaam'), (b'Africa/Djibouti', b'Djibouti'), (b'Africa/Douala', b'Douala'), (b'Africa/El_Aaiun', b'El_Aaiun'), (b'Africa/Freetown', b'Freetown'), (b'Africa/Gaborone', b'Gaborone'), (b'Africa/Harare', b'Harare'), (b'Africa/Johannesburg', b'Johannesburg'), (b'Africa/Juba', b'Juba'), (b'Africa/Kampala', b'Kampala'), (b'Africa/Khartoum', b'Khartoum'), (b'Africa/Kigali', b'Kigali'), (b'Africa/Kinshasa', b'Kinshasa'), (b'Africa/Lagos', b'Lagos'), (b'Africa/Libreville', b'Libreville'), (b'Africa/Lome', b'Lome'), (b'Africa/Luanda', b'Luanda'), (b'Africa/Lubumbashi', b'Lubumbashi'), (b'Africa/Lusaka', b'Lusaka'), (b'Africa/Malabo', b'Malabo'), (b'Africa/Maputo', b'Maputo'), (b'Africa/Maseru', b'Maseru'), (b'Africa/Mbabane', b'Mbabane'), (b'Africa/Mogadishu', b'Mogadishu'), (b'Africa/Monrovia', b'Monrovia'), (b'Africa/Nairobi', b'Nairobi'), (b'Africa/Ndjamena', b'Ndjamena'), (b'Africa/Niamey', b'Niamey'), (b'Africa/Nouakchott', b'Nouakchott'), (b'Africa/Ouagadougou', b'Ouagadougou'), (b'Africa/Porto-Novo', b'Porto-Novo'), (b'Africa/Sao_Tome', b'Sao_Tome'), (b'Africa/Timbuktu', b'Timbuktu'), (b'Africa/Tripoli', b'Tripoli'), (b'Africa/Tunis', b'Tunis'), (b'Africa/Windhoek', b'Windhoek')]), (b'America', [(b'America/Adak', b'Adak'), (b'America/Anchorage', b'Anchorage'), (b'America/Anguilla', b'Anguilla'), (b'America/Antigua', b'Antigua'), (b'America/Araguaina', b'Araguaina'), (b'America/Argentina/Buenos_Aires', b'Argentina/Buenos_Aires'), (b'America/Argentina/Catamarca', b'Argentina/Catamarca'), (b'America/Argentina/ComodRivadavia', b'Argentina/ComodRivadavia'), (b'America/Argentina/Cordoba', b'Argentina/Cordoba'), (b'America/Argentina/Jujuy', b'Argentina/Jujuy'), (b'America/Argentina/La_Rioja', b'Argentina/La_Rioja'), (b'America/Argentina/Mendoza', b'Argentina/Mendoza'), (b'America/Argentina/Rio_Gallegos', b'Argentina/Rio_Gallegos'), (b'America/Argentina/Salta', b'Argentina/Salta'), (b'America/Argentina/San_Juan', b'Argentina/San_Juan'), (b'America/Argentina/San_Luis', b'Argentina/San_Luis'), (b'America/Argentina/Tucuman', b'Argentina/Tucuman'), (b'America/Argentina/Ushuaia', b'Argentina/Ushuaia'), (b'America/Aruba', b'Aruba'), (b'America/Asuncion', b'Asuncion'), (b'America/Atikokan', b'Atikokan'), (b'America/Atka', b'Atka'), (b'America/Bahia', b'Bahia'), (b'America/Bahia_Banderas', b'Bahia_Banderas'), (b'America/Barbados', b'Barbados'), (b'America/Belem', b'Belem'), (b'America/Belize', b'Belize'), (b'America/Blanc-Sablon', b'Blanc-Sablon'), (b'America/Boa_Vista', b'Boa_Vista'), (b'America/Bogota', b'Bogota'), (b'America/Boise', b'Boise'), (b'America/Buenos_Aires', b'Buenos_Aires'), (b'America/Cambridge_Bay', b'Cambridge_Bay'), (b'America/Campo_Grande', b'Campo_Grande'), (b'America/Cancun', b'Cancun'), (b'America/Caracas', b'Caracas'), (b'America/Catamarca', b'Catamarca'), (b'America/Cayenne', b'Cayenne'), (b'America/Cayman', b'Cayman'), (b'America/Chicago', b'Chicago'), (b'America/Chihuahua', b'Chihuahua'), (b'America/Coral_Harbour', b'Coral_Harbour'), (b'America/Cordoba', b'Cordoba'), (b'America/Costa_Rica', b'Costa_Rica'), (b'America/Creston', b'Creston'), (b'America/Cuiaba', b'Cuiaba'), (b'America/Curacao', b'Curacao'), (b'America/Danmarkshavn', b'Danmarkshavn'), (b'America/Dawson', b'Dawson'), (b'America/Dawson_Creek', b'Dawson_Creek'), (b'America/Denver', b'Denver'), (b'America/Detroit', b'Detroit'), (b'America/Dominica', b'Dominica'), (b'America/Edmonton', b'Edmonton'), (b'America/Eirunepe', b'Eirunepe'), (b'America/El_Salvador', b'El_Salvador'), (b'America/Ensenada', b'Ensenada'), (b'America/Fort_Wayne', b'Fort_Wayne'), (b'America/Fortaleza', b'Fortaleza'), (b'America/Glace_Bay', b'Glace_Bay'), (b'America/Godthab', b'Godthab'), (b'America/Goose_Bay', b'Goose_Bay'), (b'America/Grand_Turk', b'Grand_Turk'), (b'America/Grenada', b'Grenada'), (b'America/Guadeloupe', b'Guadeloupe'), (b'America/Guatemala', b'Guatemala'), (b'America/Guayaquil', b'Guayaquil'), (b'America/Guyana', b'Guyana'), (b'America/Halifax', b'Halifax'), (b'America/Havana', b'Havana'), (b'America/Hermosillo', b'Hermosillo'), (b'America/Indiana/Indianapolis', b'Indiana/Indianapolis'), (b'America/Indiana/Knox', b'Indiana/Knox'), (b'America/Indiana/Marengo', b'Indiana/Marengo'), (b'America/Indiana/Petersburg', b'Indiana/Petersburg'), (b'America/Indiana/Tell_City', b'Indiana/Tell_City'), (b'America/Indiana/Vevay', b'Indiana/Vevay'), (b'America/Indiana/Vincennes', b'Indiana/Vincennes'), (b'America/Indiana/Winamac', b'Indiana/Winamac'), (b'America/Indianapolis', b'Indianapolis'), (b'America/Inuvik', b'Inuvik'), (b'America/Iqaluit', b'Iqaluit'), (b'America/Jamaica', b'Jamaica'), (b'America/Jujuy', b'Jujuy'), (b'America/Juneau', b'Juneau'), (b'America/Kentucky/Louisville', b'Kentucky/Louisville'), (b'America/Kentucky/Monticello', b'Kentucky/Monticello'), (b'America/Knox_IN', b'Knox_IN'), (b'America/Kralendijk', b'Kralendijk'), (b'America/La_Paz', b'La_Paz'), (b'America/Lima', b'Lima'), (b'America/Los_Angeles', b'Los_Angeles'), (b'America/Louisville', b'Louisville'), (b'America/Lower_Princes', b'Lower_Princes'), (b'America/Maceio', b'Maceio'), (b'America/Managua', b'Managua'), (b'America/Manaus', b'Manaus'), (b'America/Marigot', b'Marigot'), (b'America/Martinique', b'Martinique'), (b'America/Matamoros', b'Matamoros'), (b'America/Mazatlan', b'Mazatlan'), (b'America/Mendoza', b'Mendoza'), (b'America/Menominee', b'Menominee'), (b'America/Merida', b'Merida'), (b'America/Metlakatla', b'Metlakatla'), (b'America/Mexico_City', b'Mexico_City'), (b'America/Miquelon', b'Miquelon'), (b'America/Moncton', b'Moncton'), (b'America/Monterrey', b'Monterrey'), (b'America/Montevideo', b'Montevideo'), (b'America/Montreal', b'Montreal'), (b'America/Montserrat', b'Montserrat'), (b'America/Nassau', b'Nassau'), (b'America/New_York', b'New_York'), (b'America/Nipigon', b'Nipigon'), (b'America/Nome', b'Nome'), (b'America/Noronha', b'Noronha'), (b'America/North_Dakota/Beulah', b'North_Dakota/Beulah'), (b'America/North_Dakota/Center', b'North_Dakota/Center'), (b'America/North_Dakota/New_Salem', b'North_Dakota/New_Salem'), (b'America/Ojinaga', b'Ojinaga'), (b'America/Panama', b'Panama'), (b'America/Pangnirtung', b'Pangnirtung'), (b'America/Paramaribo', b'Paramaribo'), (b'America/Phoenix', b'Phoenix'), (b'America/Port-au-Prince', b'Port-au-Prince'), (b'America/Port_of_Spain', b'Port_of_Spain'), (b'America/Porto_Acre', b'Porto_Acre'), (b'America/Porto_Velho', b'Porto_Velho'), (b'America/Puerto_Rico', b'Puerto_Rico'), (b'America/Rainy_River', b'Rainy_River'), (b'America/Rankin_Inlet', b'Rankin_Inlet'), (b'America/Recife', b'Recife'), (b'America/Regina', b'Regina'), (b'America/Resolute', b'Resolute'), (b'America/Rio_Branco', b'Rio_Branco'), (b'America/Rosario', b'Rosario'), (b'America/Santa_Isabel', b'Santa_Isabel'), (b'America/Santarem', b'Santarem'), (b'America/Santiago', b'Santiago'), (b'America/Santo_Domingo', b'Santo_Domingo'), (b'America/Sao_Paulo', b'Sao_Paulo'), (b'America/Scoresbysund', b'Scoresbysund'), (b'America/Shiprock', b'Shiprock'), (b'America/Sitka', b'Sitka'), (b'America/St_Barthelemy', b'St_Barthelemy'), (b'America/St_Johns', b'St_Johns'), (b'America/St_Kitts', b'St_Kitts'), (b'America/St_Lucia', b'St_Lucia'), (b'America/St_Thomas', b'St_Thomas'), (b'America/St_Vincent', b'St_Vincent'), (b'America/Swift_Current', b'Swift_Current'), (b'America/Tegucigalpa', b'Tegucigalpa'), (b'America/Thule', b'Thule'), (b'America/Thunder_Bay', b'Thunder_Bay'), (b'America/Tijuana', b'Tijuana'), (b'America/Toronto', b'Toronto'), (b'America/Tortola', b'Tortola'), (b'America/Vancouver', b'Vancouver'), (b'America/Virgin', b'Virgin'), (b'America/Whitehorse', b'Whitehorse'), (b'America/Winnipeg', b'Winnipeg'), (b'America/Yakutat', b'Yakutat'), (b'America/Yellowknife', b'Yellowknife')]), (b'Antarctica', [(b'Antarctica/Casey', b'Casey'), (b'Antarctica/Davis', b'Davis'), (b'Antarctica/DumontDUrville', b'DumontDUrville'), (b'Antarctica/Macquarie', b'Macquarie'), (b'Antarctica/Mawson', b'Mawson'), (b'Antarctica/McMurdo', b'McMurdo'), (b'Antarctica/Palmer', b'Palmer'), (b'Antarctica/Rothera', b'Rothera'), (b'Antarctica/South_Pole', b'South_Pole'), (b'Antarctica/Syowa', b'Syowa'), (b'Antarctica/Troll', b'Troll'), (b'Antarctica/Vostok', b'Vostok')]), (b'Arctic', [(b'Arctic/Longyearbyen', b'Longyearbyen')]), (b'Asia', [(b'Asia/Aden', b'Aden'), (b'Asia/Almaty', b'Almaty'), (b'Asia/Amman', b'Amman'), (b'Asia/Anadyr', b'Anadyr'), (b'Asia/Aqtau', b'Aqtau'), (b'Asia/Aqtobe', b'Aqtobe'), (b'Asia/Ashgabat', b'Ashgabat'), (b'Asia/Ashkhabad', b'Ashkhabad'), (b'Asia/Baghdad', b'Baghdad'), (b'Asia/Bahrain', b'Bahrain'), (b'Asia/Baku', b'Baku'), (b'Asia/Bangkok', b'Bangkok'), (b'Asia/Beirut', b'Beirut'), (b'Asia/Bishkek', b'Bishkek'), (b'Asia/Brunei', b'Brunei'), (b'Asia/Calcutta', b'Calcutta'), (b'Asia/Chita', b'Chita'), (b'Asia/Choibalsan', b'Choibalsan'), (b'Asia/Chongqing', b'Chongqing'), (b'Asia/Chungking', b'Chungking'), (b'Asia/Colombo', b'Colombo'), (b'Asia/Dacca', b'Dacca'), (b'Asia/Damascus', b'Damascus'), (b'Asia/Dhaka', b'Dhaka'), (b'Asia/Dili', b'Dili'), (b'Asia/Dubai', b'Dubai'), (b'Asia/Dushanbe', b'Dushanbe'), (b'Asia/Gaza', b'Gaza'), (b'Asia/Harbin', b'Harbin'), (b'Asia/Hebron', b'Hebron'), (b'Asia/Ho_Chi_Minh', b'Ho_Chi_Minh'), (b'Asia/Hong_Kong', b'Hong_Kong'), (b'Asia/Hovd', b'Hovd'), (b'Asia/Irkutsk', b'Irkutsk'), (b'Asia/Istanbul', b'Istanbul'), (b'Asia/Jakarta', b'Jakarta'), (b'Asia/Jayapura', b'Jayapura'), (b'Asia/Jerusalem', b'Jerusalem'), (b'Asia/Kabul', b'Kabul'), (b'Asia/Kamchatka', b'Kamchatka'), (b'Asia/Karachi', b'Karachi'), (b'Asia/Kashgar', b'Kashgar'), (b'Asia/Kathmandu', b'Kathmandu'), (b'Asia/Katmandu', b'Katmandu'), (b'Asia/Khandyga', b'Khandyga'), (b'Asia/Kolkata', b'Kolkata'), (b'Asia/Krasnoyarsk', b'Krasnoyarsk'), (b'Asia/Kuala_Lumpur', b'Kuala_Lumpur'), (b'Asia/Kuching', b'Kuching'), (b'Asia/Kuwait', b'Kuwait'), (b'Asia/Macao', b'Macao'), (b'Asia/Macau', b'Macau'), (b'Asia/Magadan', b'Magadan'), (b'Asia/Makassar', b'Makassar'), (b'Asia/Manila', b'Manila'), (b'Asia/Muscat', b'Muscat'), (b'Asia/Nicosia', b'Nicosia'), (b'Asia/Novokuznetsk', b'Novokuznetsk'), (b'Asia/Novosibirsk', b'Novosibirsk'), (b'Asia/Omsk', b'Omsk'), (b'Asia/Oral', b'Oral'), (b'Asia/Phnom_Penh', b'Phnom_Penh'), (b'Asia/Pontianak', b'Pontianak'), (b'Asia/Pyongyang', b'Pyongyang'), (b'Asia/Qatar', b'Qatar'), (b'Asia/Qyzylorda', b'Qyzylorda'), (b'Asia/Rangoon', b'Rangoon'), (b'Asia/Riyadh', b'Riyadh'), (b'Asia/Saigon', b'Saigon'), (b'Asia/Sakhalin', b'Sakhalin'), (b'Asia/Samarkand', b'Samarkand'), (b'Asia/Seoul', b'Seoul'), (b'Asia/Shanghai', b'Shanghai'), (b'Asia/Singapore', b'Singapore'), (b'Asia/Srednekolymsk', b'Srednekolymsk'), (b'Asia/Taipei', b'Taipei'), (b'Asia/Tashkent', b'Tashkent'), (b'Asia/Tbilisi', b'Tbilisi'), (b'Asia/Tehran', b'Tehran'), (b'Asia/Tel_Aviv', b'Tel_Aviv'), (b'Asia/Thimbu', b'Thimbu'), (b'Asia/Thimphu', b'Thimphu'), (b'Asia/Tokyo', b'Tokyo'), (b'Asia/Ujung_Pandang', b'Ujung_Pandang'), (b'Asia/Ulaanbaatar', b'Ulaanbaatar'), (b'Asia/Ulan_Bator', b'Ulan_Bator'), (b'Asia/Urumqi', b'Urumqi'), (b'Asia/Ust-Nera', b'Ust-Nera'), (b'Asia/Vientiane', b'Vientiane'), (b'Asia/Vladivostok', b'Vladivostok'), (b'Asia/Yakutsk', b'Yakutsk'), (b'Asia/Yekaterinburg', b'Yekaterinburg'), (b'Asia/Yerevan', b'Yerevan')]), (b'Atlantic', [(b'Atlantic/Azores', b'Azores'), (b'Atlantic/Bermuda', b'Bermuda'), (b'Atlantic/Canary', b'Canary'), (b'Atlantic/Cape_Verde', b'Cape_Verde'), (b'Atlantic/Faeroe', b'Faeroe'), (b'Atlantic/Faroe', b'Faroe'), (b'Atlantic/Jan_Mayen', b'Jan_Mayen'), (b'Atlantic/Madeira', b'Madeira'), (b'Atlantic/Reykjavik', b'Reykjavik'), (b'Atlantic/South_Georgia', b'South_Georgia'), (b'Atlantic/St_Helena', b'St_Helena'), (b'Atlantic/Stanley', b'Stanley')]), (b'Australia', [(b'Australia/ACT', b'ACT'), (b'Australia/Adelaide', b'Adelaide'), (b'Australia/Brisbane', b'Brisbane'), (b'Australia/Broken_Hill', b'Broken_Hill'), (b'Australia/Canberra', b'Canberra'), (b'Australia/Currie', b'Currie'), (b'Australia/Darwin', b'Darwin'), (b'Australia/Eucla', b'Eucla'), (b'Australia/Hobart', b'Hobart'), (b'Australia/LHI', b'LHI'), (b'Australia/Lindeman', b'Lindeman'), (b'Australia/Lord_Howe', b'Lord_Howe'), (b'Australia/Melbourne', b'Melbourne'), (b'Australia/NSW', b'NSW'), (b'Australia/North', b'North'), (b'Australia/Perth', b'Perth'), (b'Australia/Queensland', b'Queensland'), (b'Australia/South', b'South'), (b'Australia/Sydney', b'Sydney'), (b'Australia/Tasmania', b'Tasmania'), (b'Australia/Victoria', b'Victoria'), (b'Australia/West', b'West'), (b'Australia/Yancowinna', b'Yancowinna')]), (b'Brazil', [(b'Brazil/Acre', b'Acre'), (b'Brazil/DeNoronha', b'DeNoronha'), (b'Brazil/East', b'East'), (b'Brazil/West', b'West')]), (b'Canada', [(b'Canada/Atlantic', b'Atlantic'), (b'Canada/Central', b'Central'), (b'Canada/East-Saskatchewan', b'East-Saskatchewan'), (b'Canada/Eastern', b'Eastern'), (b'Canada/Mountain', b'Mountain'), (b'Canada/Newfoundland', b'Newfoundland'), (b'Canada/Pacific', b'Pacific'), (b'Canada/Saskatchewan', b'Saskatchewan'), (b'Canada/Yukon', b'Yukon')]), (b'Chile', [(b'Chile/Continental', b'Continental'), (b'Chile/EasterIsland', b'EasterIsland')]), (b'Etc', [(b'Etc/Greenwich', b'Greenwich'), (b'Etc/UCT', b'UCT'), (b'Etc/UTC', b'UTC'), (b'Etc/Universal', b'Universal'), (b'Etc/Zulu', b'Zulu')]), (b'Europe', [(b'Europe/Amsterdam', b'Amsterdam'), (b'Europe/Andorra', b'Andorra'), (b'Europe/Athens', b'Athens'), (b'Europe/Belfast', b'Belfast'), (b'Europe/Belgrade', b'Belgrade'), (b'Europe/Berlin', b'Berlin'), (b'Europe/Bratislava', b'Bratislava'), (b'Europe/Brussels', b'Brussels'), (b'Europe/Bucharest', b'Bucharest'), (b'Europe/Budapest', b'Budapest'), (b'Europe/Busingen', b'Busingen'), (b'Europe/Chisinau', b'Chisinau'), (b'Europe/Copenhagen', b'Copenhagen'), (b'Europe/Dublin', b'Dublin'), (b'Europe/Gibraltar', b'Gibraltar'), (b'Europe/Guernsey', b'Guernsey'), (b'Europe/Helsinki', b'Helsinki'), (b'Europe/Isle_of_Man', b'Isle_of_Man'), (b'Europe/Istanbul', b'Istanbul'), (b'Europe/Jersey', b'Jersey'), (b'Europe/Kaliningrad', b'Kaliningrad'), (b'Europe/Kiev', b'Kiev'), (b'Europe/Lisbon', b'Lisbon'), (b'Europe/Ljubljana', b'Ljubljana'), (b'Europe/London', b'London'), (b'Europe/Luxembourg', b'Luxembourg'), (b'Europe/Madrid', b'Madrid'), (b'Europe/Malta', b'Malta'), (b'Europe/Mariehamn', b'Mariehamn'), (b'Europe/Minsk', b'Minsk'), (b'Europe/Monaco', b'Monaco'), (b'Europe/Moscow', b'Moscow'), (b'Europe/Nicosia', b'Nicosia'), (b'Europe/Oslo', b'Oslo'), (b'Europe/Paris', b'Paris'), (b'Europe/Podgorica', b'Podgorica'), (b'Europe/Prague', b'Prague'), (b'Europe/Riga', b'Riga'), (b'Europe/Rome', b'Rome'), (b'Europe/Samara', b'Samara'), (b'Europe/San_Marino', b'San_Marino'), (b'Europe/Sarajevo', b'Sarajevo'), (b'Europe/Simferopol', b'Simferopol'), (b'Europe/Skopje', b'Skopje'), (b'Europe/Sofia', b'Sofia'), (b'Europe/Stockholm', b'Stockholm'), (b'Europe/Tallinn', b'Tallinn'), (b'Europe/Tirane', b'Tirane'), (b'Europe/Tiraspol', b'Tiraspol'), (b'Europe/Uzhgorod', b'Uzhgorod'), (b'Europe/Vaduz', b'Vaduz'), (b'Europe/Vatican', b'Vatican'), (b'Europe/Vienna', b'Vienna'), (b'Europe/Vilnius', b'Vilnius'), (b'Europe/Volgograd', b'Volgograd'), (b'Europe/Warsaw', b'Warsaw'), (b'Europe/Zagreb', b'Zagreb'), (b'Europe/Zaporozhye', b'Zaporozhye'), (b'Europe/Zurich', b'Zurich')]), (b'Indian', [(b'Indian/Antananarivo', b'Antananarivo'), (b'Indian/Chagos', b'Chagos'), (b'Indian/Christmas', b'Christmas'), (b'Indian/Cocos', b'Cocos'), (b'Indian/Comoro', b'Comoro'), (b'Indian/Kerguelen', b'Kerguelen'), (b'Indian/Mahe', b'Mahe'), (b'Indian/Maldives', b'Maldives'), (b'Indian/Mauritius', b'Mauritius'), (b'Indian/Mayotte', b'Mayotte'), (b'Indian/Reunion', b'Reunion')]), (b'Mexico', [(b'Mexico/BajaNorte', b'BajaNorte'), (b'Mexico/BajaSur', b'BajaSur'), (b'Mexico/General', b'General')]), (b'Other', [(b'CET', b'CET'), (b'CST6CDT', b'CST6CDT'), (b'Cuba', b'Cuba'), (b'EET', b'EET'), (b'EST', b'EST'), (b'EST5EDT', b'EST5EDT'), (b'Egypt', b'Egypt'), (b'Eire', b'Eire'), (b'GB', b'GB'), (b'GB-Eire', b'GB-Eire'), (b'Greenwich', b'Greenwich'), (b'HST', b'HST'), (b'Hongkong', b'Hongkong'), (b'Iceland', b'Iceland'), (b'Iran', b'Iran'), (b'Israel', b'Israel'), (b'Jamaica', b'Jamaica'), (b'Japan', b'Japan'), (b'Kwajalein', b'Kwajalein'), (b'Libya', b'Libya'), (b'MET', b'MET'), (b'MST', b'MST'), (b'MST7MDT', b'MST7MDT'), (b'NZ', b'NZ'), (b'NZ-CHAT', b'NZ-CHAT'), (b'Navajo', b'Navajo'), (b'PRC', b'PRC'), (b'PST8PDT', b'PST8PDT'), (b'Poland', b'Poland'), (b'Portugal', b'Portugal'), (b'ROC', b'ROC'), (b'ROK', b'ROK'), (b'Singapore', b'Singapore'), (b'Turkey', b'Turkey'), (b'UCT', b'UCT'), (b'UTC', b'UTC'), (b'Universal', b'Universal'), (b'W-SU', b'W-SU'), (b'WET', b'WET'), (b'Zulu', b'Zulu')]), (b'Pacific', [(b'Pacific/Apia', b'Apia'), (b'Pacific/Auckland', b'Auckland'), (b'Pacific/Chatham', b'Chatham'), (b'Pacific/Chuuk', b'Chuuk'), (b'Pacific/Easter', b'Easter'), (b'Pacific/Efate', b'Efate'), (b'Pacific/Enderbury', b'Enderbury'), (b'Pacific/Fakaofo', b'Fakaofo'), (b'Pacific/Fiji', b'Fiji'), (b'Pacific/Funafuti', b'Funafuti'), (b'Pacific/Galapagos', b'Galapagos'), (b'Pacific/Gambier', b'Gambier'), (b'Pacific/Guadalcanal', b'Guadalcanal'), (b'Pacific/Guam', b'Guam'), (b'Pacific/Honolulu', b'Honolulu'), (b'Pacific/Johnston', b'Johnston'), (b'Pacific/Kiritimati', b'Kiritimati'), (b'Pacific/Kosrae', b'Kosrae'), (b'Pacific/Kwajalein', b'Kwajalein'), (b'Pacific/Majuro', b'Majuro'), (b'Pacific/Marquesas', b'Marquesas'), (b'Pacific/Midway', b'Midway'), (b'Pacific/Nauru', b'Nauru'), (b'Pacific/Niue', b'Niue'), (b'Pacific/Norfolk', b'Norfolk'), (b'Pacific/Noumea', b'Noumea'), (b'Pacific/Pago_Pago', b'Pago_Pago'), (b'Pacific/Palau', b'Palau'), (b'Pacific/Pitcairn', b'Pitcairn'), (b'Pacific/Pohnpei', b'Pohnpei'), (b'Pacific/Ponape', b'Ponape'), (b'Pacific/Port_Moresby', b'Port_Moresby'), (b'Pacific/Rarotonga', b'Rarotonga'), (b'Pacific/Saipan', b'Saipan'), (b'Pacific/Samoa', b'Samoa'), (b'Pacific/Tahiti', b'Tahiti'), (b'Pacific/Tarawa', b'Tarawa'), (b'Pacific/Tongatapu', b'Tongatapu'), (b'Pacific/Truk', b'Truk'), (b'Pacific/Wake', b'Wake'), (b'Pacific/Wallis', b'Wallis'), (b'Pacific/Yap', b'Yap')]), (b'US', [(b'US/Alaska', b'Alaska'), (b'US/Aleutian', b'Aleutian'), (b'US/Arizona', b'Arizona'), (b'US/Central', b'Central'), (b'US/East-Indiana', b'East-Indiana'), (b'US/Eastern', b'Eastern'), (b'US/Hawaii', b'Hawaii'), (b'US/Indiana-Starke', b'Indiana-Starke'), (b'US/Michigan', b'Michigan'), (b'US/Mountain', b'Mountain'), (b'US/Pacific', b'Pacific'), (b'US/Pacific-New', b'Pacific-New'), (b'US/Samoa', b'Samoa')])])),
('points', models.FloatField(default=0, db_index=True)),
('ace_theme', models.CharField(default=b'github', max_length=30, choices=[(b'ambiance', b'Ambiance'), (b'chaos', b'Chaos'), (b'chrome', b'Chrome'), (b'clouds', b'Clouds'), (b'clouds_midnight', b'Clouds Midnight'), (b'cobalt', b'Cobalt'), (b'crimson_editor', b'Crimson Editor'), (b'dawn', b'Dawn'), (b'dreamweaver', b'Dreamweaver'), (b'eclipse', b'Eclipse'), (b'github', b'Github'), (b'idle_fingers', b'Idle Fingers'), (b'katzenmilch', b'Katzenmilch'), (b'kr_theme', b'KR Theme'), (b'kuroir', b'Kuroir'), (b'merbivore', b'Merbivore'), (b'merbivore_soft', b'Merbivore Soft'), (b'mono_industrial', b'Mono Industrial'), (b'monokai', b'Monokai'), (b'pastel_on_dark', b'Pastel on Dark'), (b'solarized_dark', b'Solarized Dark'), (b'solarized_light', b'Solarized Light'), (b'terminal', b'Terminal'), (b'textmate', b'Textmate'), (b'tomorrow', b'Tomorrow'), (b'tomorrow_night', b'Tomorrow Night'), (b'tomorrow_night_blue', b'Tomorrow Night Blue'), (b'tomorrow_night_bright', b'Tomorrow Night Bright'), (b'tomorrow_night_eighties', b'Tomorrow Night Eighties'), (b'twilight', b'Twilight'), (b'vibrant_ink', b'Vibrant Ink'), (b'xcode', b'XCode')])),
('last_access', models.DateTimeField(default=django.utils.timezone.now, verbose_name=b'Last access time')),
('ip', models.GenericIPAddressField(null=True, verbose_name=b'Last IP', blank=True)),
('organization_join_time', models.DateTimeField(null=True, verbose_name=b'Organization joining date', blank=True)),
('display_rank', models.CharField(default=b'user', max_length=10, choices=[(b'user', b'Normal User'), (b'setter', b'Problem Setter'), (b'admin', b'Admin')])),
('mute', models.BooleanField(default=False, help_text=b'Some users are at their best when silent.', verbose_name=b'Comment mute')),
('rating', models.IntegerField(default=None, null=True)),
('language', models.ForeignKey(verbose_name=b'Preferred language', to='judge.Language')),
('organization', models.ForeignKey(related_query_name=b'member', related_name='members', on_delete=django.db.models.deletion.SET_NULL, verbose_name=b'Organization', blank=True, to='judge.Organization', null=True)),
('user', models.OneToOneField(verbose_name=b'User associated', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('rank', models.IntegerField()),
('rating', models.IntegerField()),
('volatility', models.IntegerField()),
('last_rated', models.DateTimeField(db_index=True)),
('contest', models.ForeignKey(related_name='ratings', to='judge.Contest')),
('participation', models.OneToOneField(related_name='rating', to='judge.ContestParticipation')),
('user', models.ForeignKey(related_name='ratings', to='judge.Profile')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Solution',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.CharField(db_index=True, max_length=100, verbose_name=b'URL', blank=True)),
('title', models.CharField(max_length=200)),
('is_public', models.BooleanField()),
('publish_on', models.DateTimeField()),
('content', models.TextField()),
('authors', models.ManyToManyField(to='judge.Profile', blank=True)),
],
options={
'permissions': (('see_private_solution', 'See hidden solutions'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(auto_now_add=True, verbose_name=b'Submission time')),
('time', models.FloatField(null=True, verbose_name=b'Execution time', db_index=True)),
('memory', models.FloatField(null=True, verbose_name=b'Memory usage')),
('points', models.FloatField(null=True, verbose_name=b'Points granted', db_index=True)),
('source', models.TextField(max_length=65536, verbose_name=b'Source code')),
('status', models.CharField(default=b'QU', max_length=2, db_index=True, choices=[(b'QU', b'Queued'), (b'P', b'Processing'), (b'G', b'Grading'), (b'D', b'Completed'), (b'IE', b'Internal Error'), (b'CE', b'Compile Error'), (b'AB', b'Aborted')])),
('result', models.CharField(default=None, choices=[(b'AC', b'Accepted'), (b'WA', b'Wrong Answer'), (b'TLE', b'Time Limit Exceeded'), (b'MLE', b'Memory Limit Exceeded'), (b'OLE', b'Output Limit Exceeded'), (b'IR', b'Invalid Return'), (b'RTE', b'Runtime Error'), (b'CE', b'Compile Error'), (b'IE', b'Internal Error'), (b'SC', b'Short circuit'), (b'AB', b'Aborted')], max_length=3, blank=True, null=True, db_index=True)),
('error', models.TextField(null=True, verbose_name=b'Compile Errors', blank=True)),
('current_testcase', models.IntegerField(default=0)),
('batch', models.BooleanField(default=False, verbose_name=b'Batched cases')),
('case_points', models.FloatField(default=0, verbose_name=b'Test case points')),
('case_total', models.FloatField(default=0, verbose_name=b'Test case total points')),
('language', models.ForeignKey(verbose_name=b'Submission language', to='judge.Language')),
('problem', models.ForeignKey(to='judge.Problem')),
('user', models.ForeignKey(to='judge.Profile')),
],
options={
'permissions': (('abort_any_submission', 'Abort any submission'), ('rejudge_submission', 'Rejudge the submission'), ('rejudge_submission_lot', 'Rejudge a lot of submissions'), ('spam_submission', 'Submit without limit'), ('view_all_submission', 'View all submission'), ('resubmit_other', "Resubmit others' submission")),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SubmissionTestCase',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('case', models.IntegerField(verbose_name=b'Test case ID')),
('status', models.CharField(max_length=3, verbose_name=b'Status flag', choices=[(b'AC', b'Accepted'), (b'WA', b'Wrong Answer'), (b'TLE', b'Time Limit Exceeded'), (b'MLE', b'Memory Limit Exceeded'), (b'OLE', b'Output Limit Exceeded'), (b'IR', b'Invalid Return'), (b'RTE', b'Runtime Error'), (b'CE', b'Compile Error'), (b'IE', b'Internal Error'), (b'SC', b'Short circuit'), (b'AB', b'Aborted')])),
('time', models.FloatField(null=True, verbose_name=b'Execution time')),
('memory', models.FloatField(null=True, verbose_name=b'Memory usage')),
('points', models.FloatField(null=True, verbose_name=b'Points granted')),
('total', models.FloatField(null=True, verbose_name=b'Points possible')),
('batch', models.IntegerField(null=True, verbose_name=b'Batch number')),
('feedback', models.CharField(max_length=50, verbose_name=b'Judging feedback', blank=True)),
('output', models.TextField(verbose_name=b'Program output', blank=True)),
('submission', models.ForeignKey(related_name='test_cases', verbose_name=b'Associated submission', to='judge.Submission')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='rating',
unique_together=set([('user', 'contest')]),
),
migrations.AddField(
model_name='problem',
name='authors',
field=models.ManyToManyField(related_name='authored_problems', verbose_name=b'Creators', to='judge.Profile', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='problem',
name='banned_users',
field=models.ManyToManyField(help_text=b'Bans the selected users from submitting to this problem', to='judge.Profile', verbose_name=b'Personae non gratae', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='problem',
name='group',
field=models.ForeignKey(verbose_name=b'Problem group', to='judge.ProblemGroup'),
preserve_default=True,
),
migrations.AddField(
model_name='problem',
name='types',
field=models.ManyToManyField(to='judge.ProblemType', verbose_name=b'Problem types'),
preserve_default=True,
),
migrations.AddField(
model_name='privatemessage',
name='sender',
field=models.ForeignKey(related_name='sent_messages', verbose_name=b'Sender', to='judge.Profile'),
preserve_default=True,
),
migrations.AddField(
model_name='privatemessage',
name='target',
field=models.ForeignKey(related_name='received_messages', verbose_name=b'Target', to='judge.Profile'),
preserve_default=True,
),
migrations.AddField(
model_name='organization',
name='admins',
field=models.ManyToManyField(help_text=b'Those who can edit this organization', related_name='+', verbose_name=b'Administrators', to='judge.Profile'),
preserve_default=True,
),
migrations.AddField(
model_name='organization',
name='registrant',
field=models.ForeignKey(related_name='registrant+', verbose_name=b'Registrant', to='judge.Profile', help_text=b'User who registered this organization'),
preserve_default=True,
),
migrations.AddField(
model_name='judge',
name='problems',
field=models.ManyToManyField(related_name='judges', to='judge.Problem'),
preserve_default=True,
),
migrations.AddField(
model_name='judge',
name='runtimes',
field=models.ManyToManyField(related_name='judges', to='judge.Language'),
preserve_default=True,
),
migrations.AddField(
model_name='contestsubmission',
name='submission',
field=models.OneToOneField(related_name='contest', to='judge.Submission'),
preserve_default=True,
),
migrations.AddField(
model_name='contestprofile',
name='user',
field=models.OneToOneField(related_query_name=b'contest', related_name='contest_profile', verbose_name=b'User', to='judge.Profile'),
preserve_default=True,
),
migrations.AddField(
model_name='contestproblem',
name='problem',
field=models.ForeignKey(related_name='contests', to='judge.Problem'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='contestproblem',
unique_together=set([('problem', 'contest')]),
),
migrations.AddField(
model_name='contestparticipation',
name='profile',
field=models.ForeignKey(related_name='history', verbose_name=b'User', to='judge.ContestProfile'),
preserve_default=True,
),
migrations.AddField(
model_name='contest',
name='organizers',
field=models.ManyToManyField(help_text=b'These people will be able to edit the contest.', related_name='organizers+', to='judge.Profile'),
preserve_default=True,
),
migrations.AddField(
model_name='contest',
name='problems',
field=models.ManyToManyField(to='judge.Problem', verbose_name=b'Problems', through='judge.ContestProblem'),
preserve_default=True,
),
migrations.AddField(
model_name='contest',
name='rate_exclude',
field=models.ManyToManyField(related_name='rate_exclude+', verbose_name=b'exclude from ratings', to='judge.Profile', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='commentvote',
name='voter',
field=models.ForeignKey(to='judge.Profile', related_name='voted_comments'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='commentvote',
unique_together=set([('voter', 'comment')]),
),
migrations.AddField(
model_name='comment',
name='author',
field=models.ForeignKey(verbose_name=b'Commenter', to='judge.Profile'),
preserve_default=True,
),
migrations.AddField(
model_name='comment',
name='parent',
field=mptt.fields.TreeForeignKey(related_name='replies', blank=True, to='judge.Comment', null=True),
preserve_default=True,
),
]
| agpl-3.0 |
agentxan/nzbToMedia | libs/unidecode/x0ad.py | 253 | 4766 | data = (
'gwan', # 0x00
'gwanj', # 0x01
'gwanh', # 0x02
'gwad', # 0x03
'gwal', # 0x04
'gwalg', # 0x05
'gwalm', # 0x06
'gwalb', # 0x07
'gwals', # 0x08
'gwalt', # 0x09
'gwalp', # 0x0a
'gwalh', # 0x0b
'gwam', # 0x0c
'gwab', # 0x0d
'gwabs', # 0x0e
'gwas', # 0x0f
'gwass', # 0x10
'gwang', # 0x11
'gwaj', # 0x12
'gwac', # 0x13
'gwak', # 0x14
'gwat', # 0x15
'gwap', # 0x16
'gwah', # 0x17
'gwae', # 0x18
'gwaeg', # 0x19
'gwaegg', # 0x1a
'gwaegs', # 0x1b
'gwaen', # 0x1c
'gwaenj', # 0x1d
'gwaenh', # 0x1e
'gwaed', # 0x1f
'gwael', # 0x20
'gwaelg', # 0x21
'gwaelm', # 0x22
'gwaelb', # 0x23
'gwaels', # 0x24
'gwaelt', # 0x25
'gwaelp', # 0x26
'gwaelh', # 0x27
'gwaem', # 0x28
'gwaeb', # 0x29
'gwaebs', # 0x2a
'gwaes', # 0x2b
'gwaess', # 0x2c
'gwaeng', # 0x2d
'gwaej', # 0x2e
'gwaec', # 0x2f
'gwaek', # 0x30
'gwaet', # 0x31
'gwaep', # 0x32
'gwaeh', # 0x33
'goe', # 0x34
'goeg', # 0x35
'goegg', # 0x36
'goegs', # 0x37
'goen', # 0x38
'goenj', # 0x39
'goenh', # 0x3a
'goed', # 0x3b
'goel', # 0x3c
'goelg', # 0x3d
'goelm', # 0x3e
'goelb', # 0x3f
'goels', # 0x40
'goelt', # 0x41
'goelp', # 0x42
'goelh', # 0x43
'goem', # 0x44
'goeb', # 0x45
'goebs', # 0x46
'goes', # 0x47
'goess', # 0x48
'goeng', # 0x49
'goej', # 0x4a
'goec', # 0x4b
'goek', # 0x4c
'goet', # 0x4d
'goep', # 0x4e
'goeh', # 0x4f
'gyo', # 0x50
'gyog', # 0x51
'gyogg', # 0x52
'gyogs', # 0x53
'gyon', # 0x54
'gyonj', # 0x55
'gyonh', # 0x56
'gyod', # 0x57
'gyol', # 0x58
'gyolg', # 0x59
'gyolm', # 0x5a
'gyolb', # 0x5b
'gyols', # 0x5c
'gyolt', # 0x5d
'gyolp', # 0x5e
'gyolh', # 0x5f
'gyom', # 0x60
'gyob', # 0x61
'gyobs', # 0x62
'gyos', # 0x63
'gyoss', # 0x64
'gyong', # 0x65
'gyoj', # 0x66
'gyoc', # 0x67
'gyok', # 0x68
'gyot', # 0x69
'gyop', # 0x6a
'gyoh', # 0x6b
'gu', # 0x6c
'gug', # 0x6d
'gugg', # 0x6e
'gugs', # 0x6f
'gun', # 0x70
'gunj', # 0x71
'gunh', # 0x72
'gud', # 0x73
'gul', # 0x74
'gulg', # 0x75
'gulm', # 0x76
'gulb', # 0x77
'guls', # 0x78
'gult', # 0x79
'gulp', # 0x7a
'gulh', # 0x7b
'gum', # 0x7c
'gub', # 0x7d
'gubs', # 0x7e
'gus', # 0x7f
'guss', # 0x80
'gung', # 0x81
'guj', # 0x82
'guc', # 0x83
'guk', # 0x84
'gut', # 0x85
'gup', # 0x86
'guh', # 0x87
'gweo', # 0x88
'gweog', # 0x89
'gweogg', # 0x8a
'gweogs', # 0x8b
'gweon', # 0x8c
'gweonj', # 0x8d
'gweonh', # 0x8e
'gweod', # 0x8f
'gweol', # 0x90
'gweolg', # 0x91
'gweolm', # 0x92
'gweolb', # 0x93
'gweols', # 0x94
'gweolt', # 0x95
'gweolp', # 0x96
'gweolh', # 0x97
'gweom', # 0x98
'gweob', # 0x99
'gweobs', # 0x9a
'gweos', # 0x9b
'gweoss', # 0x9c
'gweong', # 0x9d
'gweoj', # 0x9e
'gweoc', # 0x9f
'gweok', # 0xa0
'gweot', # 0xa1
'gweop', # 0xa2
'gweoh', # 0xa3
'gwe', # 0xa4
'gweg', # 0xa5
'gwegg', # 0xa6
'gwegs', # 0xa7
'gwen', # 0xa8
'gwenj', # 0xa9
'gwenh', # 0xaa
'gwed', # 0xab
'gwel', # 0xac
'gwelg', # 0xad
'gwelm', # 0xae
'gwelb', # 0xaf
'gwels', # 0xb0
'gwelt', # 0xb1
'gwelp', # 0xb2
'gwelh', # 0xb3
'gwem', # 0xb4
'gweb', # 0xb5
'gwebs', # 0xb6
'gwes', # 0xb7
'gwess', # 0xb8
'gweng', # 0xb9
'gwej', # 0xba
'gwec', # 0xbb
'gwek', # 0xbc
'gwet', # 0xbd
'gwep', # 0xbe
'gweh', # 0xbf
'gwi', # 0xc0
'gwig', # 0xc1
'gwigg', # 0xc2
'gwigs', # 0xc3
'gwin', # 0xc4
'gwinj', # 0xc5
'gwinh', # 0xc6
'gwid', # 0xc7
'gwil', # 0xc8
'gwilg', # 0xc9
'gwilm', # 0xca
'gwilb', # 0xcb
'gwils', # 0xcc
'gwilt', # 0xcd
'gwilp', # 0xce
'gwilh', # 0xcf
'gwim', # 0xd0
'gwib', # 0xd1
'gwibs', # 0xd2
'gwis', # 0xd3
'gwiss', # 0xd4
'gwing', # 0xd5
'gwij', # 0xd6
'gwic', # 0xd7
'gwik', # 0xd8
'gwit', # 0xd9
'gwip', # 0xda
'gwih', # 0xdb
'gyu', # 0xdc
'gyug', # 0xdd
'gyugg', # 0xde
'gyugs', # 0xdf
'gyun', # 0xe0
'gyunj', # 0xe1
'gyunh', # 0xe2
'gyud', # 0xe3
'gyul', # 0xe4
'gyulg', # 0xe5
'gyulm', # 0xe6
'gyulb', # 0xe7
'gyuls', # 0xe8
'gyult', # 0xe9
'gyulp', # 0xea
'gyulh', # 0xeb
'gyum', # 0xec
'gyub', # 0xed
'gyubs', # 0xee
'gyus', # 0xef
'gyuss', # 0xf0
'gyung', # 0xf1
'gyuj', # 0xf2
'gyuc', # 0xf3
'gyuk', # 0xf4
'gyut', # 0xf5
'gyup', # 0xf6
'gyuh', # 0xf7
'geu', # 0xf8
'geug', # 0xf9
'geugg', # 0xfa
'geugs', # 0xfb
'geun', # 0xfc
'geunj', # 0xfd
'geunh', # 0xfe
'geud', # 0xff
)
| gpl-3.0 |
bratsche/Neutron-Drive | google_appengine/lib/django_1_3/django/contrib/comments/models.py | 313 | 7636 | import datetime
from django.contrib.auth.models import User
from django.contrib.comments.managers import CommentManager
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.db import models
from django.core import urlresolvers
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
COMMENT_MAX_LENGTH = getattr(settings,'COMMENT_MAX_LENGTH',3000)
class BaseCommentAbstractModel(models.Model):
"""
An abstract base class that any custom comment models probably should
subclass.
"""
# Content-object field
content_type = models.ForeignKey(ContentType,
verbose_name=_('content type'),
related_name="content_type_set_for_%(class)s")
object_pk = models.TextField(_('object ID'))
content_object = generic.GenericForeignKey(ct_field="content_type", fk_field="object_pk")
# Metadata about the comment
site = models.ForeignKey(Site)
class Meta:
abstract = True
def get_content_object_url(self):
"""
Get a URL suitable for redirecting to the content object.
"""
return urlresolvers.reverse(
"comments-url-redirect",
args=(self.content_type_id, self.object_pk)
)
class Comment(BaseCommentAbstractModel):
"""
A user comment about some object.
"""
# Who posted this comment? If ``user`` is set then it was an authenticated
# user; otherwise at least user_name should have been set and the comment
# was posted by a non-authenticated user.
user = models.ForeignKey(User, verbose_name=_('user'),
blank=True, null=True, related_name="%(class)s_comments")
user_name = models.CharField(_("user's name"), max_length=50, blank=True)
user_email = models.EmailField(_("user's email address"), blank=True)
user_url = models.URLField(_("user's URL"), blank=True)
comment = models.TextField(_('comment'), max_length=COMMENT_MAX_LENGTH)
# Metadata about the comment
submit_date = models.DateTimeField(_('date/time submitted'), default=None)
ip_address = models.IPAddressField(_('IP address'), blank=True, null=True)
is_public = models.BooleanField(_('is public'), default=True,
help_text=_('Uncheck this box to make the comment effectively ' \
'disappear from the site.'))
is_removed = models.BooleanField(_('is removed'), default=False,
help_text=_('Check this box if the comment is inappropriate. ' \
'A "This comment has been removed" message will ' \
'be displayed instead.'))
# Manager
objects = CommentManager()
class Meta:
db_table = "django_comments"
ordering = ('submit_date',)
permissions = [("can_moderate", "Can moderate comments")]
verbose_name = _('comment')
verbose_name_plural = _('comments')
def __unicode__(self):
return "%s: %s..." % (self.name, self.comment[:50])
def save(self, *args, **kwargs):
if self.submit_date is None:
self.submit_date = datetime.datetime.now()
super(Comment, self).save(*args, **kwargs)
def _get_userinfo(self):
"""
Get a dictionary that pulls together information about the poster
safely for both authenticated and non-authenticated comments.
This dict will have ``name``, ``email``, and ``url`` fields.
"""
if not hasattr(self, "_userinfo"):
self._userinfo = {
"name" : self.user_name,
"email" : self.user_email,
"url" : self.user_url
}
if self.user_id:
u = self.user
if u.email:
self._userinfo["email"] = u.email
# If the user has a full name, use that for the user name.
# However, a given user_name overrides the raw user.username,
# so only use that if this comment has no associated name.
if u.get_full_name():
self._userinfo["name"] = self.user.get_full_name()
elif not self.user_name:
self._userinfo["name"] = u.username
return self._userinfo
userinfo = property(_get_userinfo, doc=_get_userinfo.__doc__)
def _get_name(self):
return self.userinfo["name"]
def _set_name(self, val):
if self.user_id:
raise AttributeError(_("This comment was posted by an authenticated "\
"user and thus the name is read-only."))
self.user_name = val
name = property(_get_name, _set_name, doc="The name of the user who posted this comment")
def _get_email(self):
return self.userinfo["email"]
def _set_email(self, val):
if self.user_id:
raise AttributeError(_("This comment was posted by an authenticated "\
"user and thus the email is read-only."))
self.user_email = val
email = property(_get_email, _set_email, doc="The email of the user who posted this comment")
def _get_url(self):
return self.userinfo["url"]
def _set_url(self, val):
self.user_url = val
url = property(_get_url, _set_url, doc="The URL given by the user who posted this comment")
def get_absolute_url(self, anchor_pattern="#c%(id)s"):
return self.get_content_object_url() + (anchor_pattern % self.__dict__)
def get_as_text(self):
"""
Return this comment as plain text. Useful for emails.
"""
d = {
'user': self.user or self.name,
'date': self.submit_date,
'comment': self.comment,
'domain': self.site.domain,
'url': self.get_absolute_url()
}
return _('Posted by %(user)s at %(date)s\n\n%(comment)s\n\nhttp://%(domain)s%(url)s') % d
class CommentFlag(models.Model):
"""
Records a flag on a comment. This is intentionally flexible; right now, a
flag could be:
* A "removal suggestion" -- where a user suggests a comment for (potential) removal.
* A "moderator deletion" -- used when a moderator deletes a comment.
You can (ab)use this model to add other flags, if needed. However, by
design users are only allowed to flag a comment with a given flag once;
if you want rating look elsewhere.
"""
user = models.ForeignKey(User, verbose_name=_('user'), related_name="comment_flags")
comment = models.ForeignKey(Comment, verbose_name=_('comment'), related_name="flags")
flag = models.CharField(_('flag'), max_length=30, db_index=True)
flag_date = models.DateTimeField(_('date'), default=None)
# Constants for flag types
SUGGEST_REMOVAL = "removal suggestion"
MODERATOR_DELETION = "moderator deletion"
MODERATOR_APPROVAL = "moderator approval"
class Meta:
db_table = 'django_comment_flags'
unique_together = [('user', 'comment', 'flag')]
verbose_name = _('comment flag')
verbose_name_plural = _('comment flags')
def __unicode__(self):
return "%s flag of comment ID %s by %s" % \
(self.flag, self.comment_id, self.user.username)
def save(self, *args, **kwargs):
if self.flag_date is None:
self.flag_date = datetime.datetime.now()
super(CommentFlag, self).save(*args, **kwargs)
| bsd-3-clause |
NaN-git/bicreditsnew | qa/rpc-tests/listtransactions.py | 4 | 4726 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcredit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework import BitcreditTestFramework
from bitcreditrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
class ListTransactionsTest(BitcreditTestFramework):
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].setgenerate(True, 1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
if __name__ == '__main__':
ListTransactionsTest().main()
| mit |
crosswalk-project/chromium-crosswalk-efl | tools/perf/page_sets/presubmit_unittest.py | 43 | 5409 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import sys
import unittest
PERF_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(os.path.dirname(PERF_ROOT), 'telemetry'))
from telemetry.unittest import system_stub
sys.path.insert(0, PERF_ROOT)
from page_sets import PRESUBMIT
class AffectedFileStub(object):
def __init__(self, absolute_local_path, action):
self._absolute_local_path = absolute_local_path
self.action = action
def AbsoluteLocalPath(self):
return self._absolute_local_path
def Action(self):
return self.action
class InputAPIStub(object):
def __init__(self, paths, deleted_paths=None, added_paths=None):
self._paths = paths
self._deleted_paths = deleted_paths if deleted_paths else []
self._added_paths = added_paths if added_paths else []
def AffectedFiles(self, include_deletes=True, file_filter=None):
if not file_filter:
file_filter = lambda x: True
affected_files = []
for path in self._paths:
affected_file_stub = AffectedFileStub(path, 'M')
if file_filter(affected_file_stub):
affected_files.append(affected_file_stub)
for path in self._added_paths:
affected_file_stub = AffectedFileStub(path, 'A')
if file_filter(affected_file_stub):
affected_files.append(affected_file_stub)
if include_deletes:
for path in self._deleted_paths:
affected_file_stub = AffectedFileStub(path, 'D')
if file_filter(affected_file_stub):
affected_files.append(affected_file_stub)
return affected_files
def AbsoluteLocalPaths(self):
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def PresubmitLocalPath(self):
return PRESUBMIT.__file__
class OutputAPIStub(object):
class PresubmitError(Exception):
pass
class PresubmitNotifyResult(Exception):
pass
PRESUBMIT.LoadSupport(InputAPIStub([])) # do this to support monkey patching
class PresubmitTest(unittest.TestCase):
def setUp(self):
success_file_hash = 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
self._stubs = system_stub.Override(
PRESUBMIT, ['cloud_storage', 'os', 'raw_input'])
self._stubs.raw_input.input = 'public'
# Files in Cloud Storage.
self._stubs.cloud_storage.remote_paths = [
'skip'.zfill(40),
]
# Local data files and their hashes.
self._stubs.cloud_storage.local_file_hashes = {
'/path/to/skip.wpr': 'skip'.zfill(40),
'/path/to/success.wpr': success_file_hash,
'/path/to/wrong_hash.wpr': success_file_hash,
}
# Local data files.
self._stubs.os.path.files = (
self._stubs.cloud_storage.local_file_hashes.keys())
# Local hash files and their contents.
self._stubs.cloud_storage.local_hash_files = {
'/path/to/invalid_hash.wpr.sha1': 'invalid_hash',
'/path/to/missing.wpr.sha1': 'missing'.zfill(40),
'/path/to/success.wpr.sha1': success_file_hash,
'/path/to/skip.wpr.sha1': 'skip'.zfill(40),
'/path/to/wrong_hash.wpr.sha1': 'wronghash'.zfill(40),
}
def tearDown(self):
self._stubs.Restore()
def assertResultCount(self, results, expected_errors, expected_notifications):
counts = collections.defaultdict(int)
for result in results:
counts[type(result)] += 1
actual_errors = counts[OutputAPIStub.PresubmitError]
actual_notifications = counts[OutputAPIStub.PresubmitNotifyResult]
self.assertEqual(expected_errors, actual_errors,
msg='Expected %d errors, but got %d. Results: %s' %
(expected_errors, actual_errors, results))
self.assertEqual(expected_notifications, actual_notifications,
msg='Expected %d notifications, but got %d. Results: %s' %
(expected_notifications, actual_notifications, results))
def _CheckUpload(self, paths, deleted_paths=None, added_paths=None):
input_api = InputAPIStub(paths, deleted_paths, added_paths)
return PRESUBMIT.CheckChangeOnUpload(input_api, OutputAPIStub())
def testIgnoreDeleted(self):
results = self._CheckUpload([], ['/path/to/deleted.wpr.sha1'])
self.assertResultCount(results, 0, 0)
def testIgnoreNonHashes(self):
results = self._CheckUpload(['/path/to/irrelevant.py'])
self.assertResultCount(results, 0, 0)
def testInvalidHash(self):
results = self._CheckUpload(['/path/to/invalid_hash.wpr.sha1'])
self.assertResultCount(results, 1, 0)
self.assertTrue('valid SHA-1 hash' in str(results[0]), msg=results[0])
def testMissingFile(self):
results = self._CheckUpload(['/path/to/missing.wpr.sha1'])
self.assertResultCount(results, 1, 0)
self.assertTrue('not found' in str(results[0]), msg=results[0])
def testSkip(self):
results = self._CheckUpload(['/path/to/skip.wpr.sha1'])
self.assertResultCount(results, 0, 0)
def testSuccess(self):
results = self._CheckUpload(['/path/to/success.wpr.sha1'])
self.assertResultCount(results, 0, 1)
self.assertTrue('Uploaded' in str(results[0]), msg=results[0])
def testWrongHash(self):
results = self._CheckUpload(['/path/to/wrong_hash.wpr.sha1'])
self.assertTrue('does not match' in str(results[0]), msg=results[0])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
ict-felix/stack | expedient/src/python/expedient/common/federation/geni/util/cert_util.py | 2 | 2940 | #----------------------------------------------------------------------
# Copyright (c) 2010 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
'''
Certificate (GID in SFA terms) creation and verification utilities.
'''
from expedient.common.federation.sfa.trust.gid import GID
from expedient.common.federation.sfa.trust.certificate import Keypair
from expedient.common.federation.geni.util.urn_util import URN
def create_cert(urn, issuer_key=None, issuer_cert=None, intermediate=False):
'''Create a new certificate and return it and the associated keys.
If issuer cert and key are given, they sign the certificate. Otherwise
it is a self-signed certificate.
If intermediate then mark this
as an intermediate CA certificate (can sign).
Certificate URN must be supplied.
CN of the cert will be dotted notation authority.type.name from the URN.
'''
# Note the below throws a ValueError if it wasnt a valid URN
c_urn = URN(urn=urn)
dotted = '%s.%s.%s' % (c_urn.getAuthority(), c_urn.getType(), c_urn.getName())
newgid = GID(create=True, subject=dotted[:64],
urn=urn)
keys = Keypair(create=True)
newgid.set_pubkey(keys)
if intermediate:
# This cert will be able to sign certificates
newgid.set_intermediate_ca(intermediate)
if issuer_key and issuer_cert:
# the given issuer will issue this cert
if isinstance(issuer_key,str):
issuer_key = Keypair(filename=issuer_key)
if isinstance(issuer_cert,str):
issuer_cert = GID(filename=issuer_cert)
newgid.set_issuer(issuer_key, cert=issuer_cert)
newgid.set_parent(issuer_cert)
else:
# create a self-signed cert
newgid.set_issuer(keys, subject=dotted)
newgid.encode()
newgid.sign()
return newgid, keys
| apache-2.0 |
matousc89/padasip | padasip/filters/nlmf.py | 1 | 5444 | """
.. versionadded:: 1.1.0
The least-mean-fourth (LMF) adaptive filter implemented according to the
paper :cite:`zerguine2000convergence`. The NLMF is an extension of the LMF
adaptive filter (:ref:`filter-lmf`).
The NLMF filter can be created as follows
>>> import padasip as pa
>>> pa.filters.FilterNLMF(n)
where `n` is the size (number of taps) of the filter.
Content of this page:
.. contents::
:local:
:depth: 1
.. seealso:: :ref:`filters`
Algorithm Explanation
======================================
The NLMF is extension of LMF filter. See :ref:`filter-lmf`
for explanation of the algorithm behind.
The extension is based on normalization of learning rate.
The learning rage :math:`\mu` is replaced by learning rate :math:`\eta(k)`
normalized with every new sample according to input power as follows
:math:`\eta (k) = \\frac{\mu}{\epsilon + || \\textbf{x}(k) ||^2}`,
where :math:`|| \\textbf{x}(k) ||^2` is norm of input vector and
:math:`\epsilon` is a small positive constant (regularization term).
This constant is introduced to preserve the stability in cases where
the input is close to zero.
Minimal Working Examples
======================================
If you have measured data you may filter it as follows
.. code-block:: python
import numpy as np
import matplotlib.pylab as plt
import padasip as pa
# creation of data
N = 500
x = np.random.normal(0, 1, (N, 4)) # input matrix
v = np.random.normal(0, 0.1, N) # noise
d = 2*x[:,0] + 0.1*x[:,1] - 0.3*x[:,2] + 0.5*x[:,3] + v # target
# identification
f = pa.filters.FilterNLMF(n=4, mu=0.005, w="random")
y, e, w = f.run(d, x)
# show results
plt.figure(figsize=(15,9))
plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
plt.plot(d,"b", label="d - target")
plt.plot(y,"g", label="y - output");plt.legend()
plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
plt.plot(10*np.log10(e**2),"r", label="e - error [dB]");plt.legend()
plt.tight_layout()
plt.show()
References
======================================
.. bibliography:: lmf.bib
:style: plain
Code Explanation
======================================
"""
import numpy as np
from padasip.filters.base_filter import AdaptiveFilter
class FilterNLMF(AdaptiveFilter):
"""
Adaptive NLMF filter.
**Args:**
* `n` : length of filter (integer) - how many input is input array
(row of input matrix)
**Kwargs:**
* `mu` : learning rate (float). Also known as step size.
If it is too slow,
the filter may have bad performance. If it is too high,
the filter will be unstable. The default value can be unstable
for ill-conditioned input data.
* `eps` : regularization term (float). It is introduced to preserve
stability for close-to-zero input vectors
* `w` : initial weights of filter. Possible values are:
* array with initial weights (1 dimensional array) of filter size
* "random" : create random weights
* "zeros" : create zero value weights
"""
def __init__(self, n, mu=0.1, eps=1., w="random"):
self.kind = "NLMF filter"
if type(n) == int:
self.n = n
else:
raise ValueError('The size of filter must be an integer')
self.mu = self.check_float_param(mu, 0, 1000, "mu")
self.eps = self.check_float_param(eps, 0, 1000, "eps")
self.init_weights(w, self.n)
self.w_history = False
def adapt(self, d, x):
"""
Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array)
"""
y = np.dot(self.w, x)
e = d - y
nu = self.mu / (self.eps + np.dot(x, x))
self.w += nu * x * e**3
def run(self, d, x):
"""
This function filters multiple samples in a row.
**Args:**
* `d` : desired value (1 dimensional array)
* `x` : input matrix (2-dimensional array). Rows are samples,
columns are input arrays.
**Returns:**
* `y` : output value (1 dimensional array).
The size corresponds with the desired value.
* `e` : filter error for every sample (1 dimensional array).
The size corresponds with the desired value.
* `w` : history of all weights (2 dimensional array).
Every row is set of the weights for given sample.
"""
# measure the data and check if the dimmension agree
N = len(x)
if not len(d) == N:
raise ValueError('The length of vector d and matrix x must agree.')
self.n = len(x[0])
# prepare data
try:
x = np.array(x)
d = np.array(d)
except:
raise ValueError('Impossible to convert x or d to a numpy array')
# create empty arrays
y = np.zeros(N)
e = np.zeros(N)
self.w_history = np.zeros((N,self.n))
# adaptation loop
for k in range(N):
self.w_history[k,:] = self.w
y[k] = np.dot(self.w, x[k])
e[k] = d[k] - y[k]
nu = self.mu / (self.eps + np.dot(x[k], x[k]))
dw = nu * x[k] * e[k]**3
self.w += dw
return y, e, self.w_history
| mit |
Bysmyyr/blink-crosswalk | Tools/Scripts/webkitpy/tool/main.py | 44 | 3144 | # Copyright (c) 2010 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# A tool for automating dealing with bugzilla, posting patches, committing patches, etc.
from optparse import make_option
from webkitpy.common.host import Host
from webkitpy.tool.multicommandtool import MultiCommandTool
from webkitpy.tool import commands
class WebKitPatch(MultiCommandTool, Host):
global_options = [
make_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="enable all logging"),
make_option("-d", "--directory", action="append", dest="patch_directories", default=[], help="Directory to look at for changed files"),
]
def __init__(self, path):
MultiCommandTool.__init__(self)
Host.__init__(self)
self._path = path
def path(self):
return self._path
def should_show_in_main_help(self, command):
if not command.show_in_main_help:
return False
if command.requires_local_commits:
return self.scm().supports_local_commits()
return True
# FIXME: This may be unnecessary since we pass global options to all commands during execute() as well.
def handle_global_options(self, options):
self.initialize_scm(options.patch_directories)
def should_execute_command(self, command):
if command.requires_local_commits and not self.scm().supports_local_commits():
failure_reason = "%s requires local commits using %s in %s." % (command.name, self.scm().display_name(), self.scm().checkout_root)
return (False, failure_reason)
return (True, None)
| bsd-3-clause |
trondhindenes/ansible | lib/ansible/executor/task_queue_manager.py | 20 | 15648 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import multiprocessing
import os
import tempfile
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.executor.stats import AggregateStats
from ansible.executor.task_result import TaskResult
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text, to_native
from ansible.playbook.block import Block
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import callback_loader, strategy_loader, module_loader
from ansible.plugins.callback import CallbackBase
from ansible.template import Templar
from ansible.utils.helpers import pct_to_int
from ansible.vars.hostvars import HostVars
from ansible.vars.reserved import warn_if_reserved
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['TaskQueueManager']
class TaskQueueManager:
'''
This class handles the multiprocessing requirements of Ansible by
creating a pool of worker forks, a result handler fork, and a
manager object with shared datastructures/queues for coordinating
work between all processes.
The queue manager is responsible for loading the play strategy plugin,
which dispatches the Play's tasks to hosts.
'''
RUN_OK = 0
RUN_ERROR = 1
RUN_FAILED_HOSTS = 2
RUN_UNREACHABLE_HOSTS = 4
RUN_FAILED_BREAK_PLAY = 8
RUN_UNKNOWN_ERROR = 255
def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False):
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._options = options
self._stats = AggregateStats()
self.passwords = passwords
self._stdout_callback = stdout_callback
self._run_additional_callbacks = run_additional_callbacks
self._run_tree = run_tree
self._callbacks_loaded = False
self._callback_plugins = []
self._start_at_done = False
# make sure any module paths (if specified) are added to the module_loader
if options.module_path:
for path in options.module_path:
if path:
module_loader.add_directory(path)
# a special flag to help us exit cleanly
self._terminated = False
# this dictionary is used to keep track of notified handlers
self._notified_handlers = dict()
self._listening_handlers = dict()
# dictionaries to keep track of failed/unreachable hosts
self._failed_hosts = dict()
self._unreachable_hosts = dict()
try:
self._final_q = multiprocessing.Queue()
except OSError as e:
raise AnsibleError("Unable to use multiprocessing, this is normally caused by lack of access to /dev/shm: %s" % to_native(e))
# A temporary file (opened pre-fork) used by connection
# plugins for inter-process locking.
self._connection_lockfile = tempfile.TemporaryFile()
def _initialize_processes(self, num):
self._workers = []
for i in range(num):
self._workers.append(None)
def _initialize_notified_handlers(self, play):
'''
Clears and initializes the shared notified handlers dict with entries
for each handler in the play, which is an empty array that will contain
inventory hostnames for those hosts triggering the handler.
'''
# Zero the dictionary first by removing any entries there.
# Proxied dicts don't support iteritems, so we have to use keys()
self._notified_handlers.clear()
self._listening_handlers.clear()
def _process_block(b):
temp_list = []
for t in b.block:
if isinstance(t, Block):
temp_list.extend(_process_block(t))
else:
temp_list.append(t)
return temp_list
handler_list = []
for handler_block in play.handlers:
handler_list.extend(_process_block(handler_block))
# then initialize it with the given handler list
self.update_handler_list(handler_list)
def update_handler_list(self, handler_list):
for handler in handler_list:
if handler._uuid not in self._notified_handlers:
display.debug("Adding handler %s to notified list" % handler.name)
self._notified_handlers[handler._uuid] = []
if handler.listen:
listeners = handler.listen
if not isinstance(listeners, list):
listeners = [listeners]
for listener in listeners:
if listener not in self._listening_handlers:
self._listening_handlers[listener] = []
display.debug("Adding handler %s to listening list" % handler.name)
self._listening_handlers[listener].append(handler._uuid)
def load_callbacks(self):
'''
Loads all available callbacks, with the exception of those which
utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
only one such callback plugin will be loaded.
'''
if self._callbacks_loaded:
return
stdout_callback_loaded = False
if self._stdout_callback is None:
self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
if isinstance(self._stdout_callback, CallbackBase):
stdout_callback_loaded = True
elif isinstance(self._stdout_callback, string_types):
if self._stdout_callback not in callback_loader:
raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
else:
self._stdout_callback = callback_loader.get(self._stdout_callback)
try:
self._stdout_callback.set_options()
except AttributeError:
display.deprecated("%s stdout callback, does not support setting 'options', it will work for now, "
" but this will be required in the future and should be updated,"
" see the 2.4 porting guide for details." % self._stdout_callback._load_name, version="2.9")
stdout_callback_loaded = True
else:
raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin")
for callback_plugin in callback_loader.all(class_only=True):
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', '')
callback_needs_whitelist = getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False)
(callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
if callback_type == 'stdout':
# we only allow one callback of type 'stdout' to be loaded,
if callback_name != self._stdout_callback or stdout_callback_loaded:
continue
stdout_callback_loaded = True
elif callback_name == 'tree' and self._run_tree:
# special case for ansible cli option
pass
elif not self._run_additional_callbacks or (callback_needs_whitelist and (
C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)):
# 2.x plugins shipped with ansible should require whitelisting, older or non shipped should load automatically
continue
callback_obj = callback_plugin()
try:
callback_obj.set_options()
except AttributeError:
display.deprecated("%s callback, does not support setting 'options', it will work for now, "
" but this will be required in the future and should be updated, "
" see the 2.4 porting guide for details." % callback_obj._load_name, version="2.9")
self._callback_plugins.append(callback_obj)
self._callbacks_loaded = True
def run(self, play):
'''
Iterates over the roles/tasks in a play, using the given (or default)
strategy for queueing tasks. The default is the linear strategy, which
operates like classic Ansible by keeping all hosts in lock-step with
a given task (meaning no hosts move on to the next task until all hosts
are done with the current task).
'''
if not self._callbacks_loaded:
self.load_callbacks()
all_vars = self._variable_manager.get_vars(play=play)
warn_if_reserved(all_vars)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
new_play.handlers = new_play.compile_roles_handlers() + new_play.handlers
self.hostvars = HostVars(
inventory=self._inventory,
variable_manager=self._variable_manager,
loader=self._loader,
)
play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno())
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_play_context'):
callback_plugin.set_play_context(play_context)
self.send_callback('v2_playbook_on_play_start', new_play)
# initialize the shared dictionary containing the notified handlers
self._initialize_notified_handlers(new_play)
# build the iterator
iterator = PlayIterator(
inventory=self._inventory,
play=new_play,
play_context=play_context,
variable_manager=self._variable_manager,
all_vars=all_vars,
start_at_done=self._start_at_done,
)
# adjust to # of workers to configured forks or size of batch, whatever is lower
self._initialize_processes(min(self._options.forks, iterator.batch_size))
# load the specified strategy (or the default linear one)
strategy = strategy_loader.get(new_play.strategy, self)
if strategy is None:
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
# Because the TQM may survive multiple play runs, we start by marking
# any hosts as failed in the iterator here which may have been marked
# as failed in previous runs. Then we clear the internal list of failed
# hosts so we know what failed this round.
for host_name in self._failed_hosts.keys():
host = self._inventory.get_host(host_name)
iterator.mark_host_failed(host)
self.clear_failed_hosts()
# during initialization, the PlayContext will clear the start_at_task
# field to signal that a matching task was found, so check that here
# and remember it so we don't try to skip tasks on future plays
if getattr(self._options, 'start_at_task', None) is not None and play_context.start_at_task is None:
self._start_at_done = True
# and run the play using the strategy and cleanup on way out
play_return = strategy.run(iterator, play_context)
# now re-save the hosts that failed from the iterator to our internal list
for host_name in iterator.get_failed_hosts():
self._failed_hosts[host_name] = True
strategy.cleanup()
self._cleanup_processes()
return play_return
def cleanup(self):
display.debug("RUNNING CLEANUP")
self.terminate()
self._final_q.close()
self._cleanup_processes()
def _cleanup_processes(self):
if hasattr(self, '_workers'):
for worker_prc in self._workers:
if worker_prc and worker_prc.is_alive():
try:
worker_prc.terminate()
except AttributeError:
pass
def clear_failed_hosts(self):
self._failed_hosts = dict()
def get_inventory(self):
return self._inventory
def get_variable_manager(self):
return self._variable_manager
def get_loader(self):
return self._loader
def get_workers(self):
return self._workers[:]
def terminate(self):
self._terminated = True
def has_dead_workers(self):
# [<WorkerProcess(WorkerProcess-2, stopped[SIGKILL])>,
# <WorkerProcess(WorkerProcess-2, stopped[SIGTERM])>
defunct = False
for (idx, x) in enumerate(self._workers):
if hasattr(x, 'exitcode'):
if x.exitcode in [-9, -11, -15]:
defunct = True
return defunct
def send_callback(self, method_name, *args, **kwargs):
for callback_plugin in [self._stdout_callback] + self._callback_plugins:
# a plugin that set self.disabled to True will not be called
# see osx_say.py example for such a plugin
if getattr(callback_plugin, 'disabled', False):
continue
# try to find v2 method, fallback to v1 method, ignore callback if no method found
methods = []
for possible in [method_name, 'v2_on_any']:
gotit = getattr(callback_plugin, possible, None)
if gotit is None:
gotit = getattr(callback_plugin, possible.replace('v2_', ''), None)
if gotit is not None:
methods.append(gotit)
# send clean copies
new_args = []
for arg in args:
# FIXME: add play/task cleaners
if isinstance(arg, TaskResult):
new_args.append(arg.clean_copy())
# elif isinstance(arg, Play):
# elif isinstance(arg, Task):
else:
new_args.append(arg)
for method in methods:
try:
method(*new_args, **kwargs)
except Exception as e:
# TODO: add config toggle to make this fatal or not?
display.warning(u"Failure using method (%s) in callback plugin (%s): %s" % (to_text(method_name), to_text(callback_plugin), to_text(e)))
from traceback import format_tb
from sys import exc_info
display.vvv('Callback Exception: \n' + ' '.join(format_tb(exc_info()[2])))
| gpl-3.0 |
tachijuan/python | myscripts/imap.py | 1 | 1470 | import os, sys, imaplib, rfc822, re, StringIO
import RPi.GPIO as GPIO
import time
server ='mail.xxx.us'
username='juan@xxx.us'
password='xxx'
GPIO.setmode(GPIO.BOARD)
GREEN_LED = 22
RED_LED = 7
GPIO.setup(GREEN_LED, GPIO.OUT)
GPIO.setup(RED_LED, GPIO.OUT)
M = imaplib.IMAP4_SSL(server)
M.login(username, password)
M.select()
try:
while 1:
print "checking email"
typ, data = M.search(None, '(UNSEEN SUBJECT "PIFI MESSAGE")')
for num in data[0].split():
typ, data = M.fetch(num, '(RFC822)')
#print 'Message %s\n%s\n' % (num, data[0][1])
redon = re.search( "RED ON",
data[0][1],
re.MULTILINE|re.DOTALL )
greenon = re.search( "GREEN ON",
data[0][1],
re.MULTILINE|re.DOTALL )
redoff = re.search( "RED OFF",
data[0][1],
re.MULTILINE|re.DOTALL )
greenoff = re.search( "GREEN OFF",
data[0][1],
re.MULTILINE|re.DOTALL )
if redon:
GPIO.output(RED_LED, True)
print "red on"
if greenon:
GPIO.output(GREEN_LED, True)
print "green on"
if redoff:
GPIO.output(RED_LED, False)
print "red off"
if greenoff:
GPIO.output(GREEN_LED, False)
print "green off"
time.sleep(120)
except KeyboardInterrupt:
GPIO.cleanup()
pass
M.close()
M.logout()
| mit |
codepython/CollectorCity-Market-Place | stores/apps/blog_pages/tests.py | 2 | 8560 | """
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
import datetime
import logging
import time
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from blog_pages.models import *
class PagesTest(TestCase):
fixtures = [
'greatcoins_market.json',
'greatcoins_subscriptions.json',
'greatcoins_auth.json',
'greatcoins_shops.json',
'greatcoins_preferences.json',
'greatcoins_themes.json'
]
def setUp(self):
shop = Shop.objects.all()[0]
about = About(shop=shop)
about.save()
Menu.create_default(shop)
home = Home(shop=shop)
home.save()
page = Page(shop=shop, name="Just a Page", name_link="somewhere", title="This is a page", body="some content here")
page.save()
self.shop = shop
self.HTTP_HOST = self.shop.default_dns
def test_posts(self):
"""
"""
user = self.shop.admin
success = self.client.login(username=user.username, password="test")
self.assertEqual(success, True, "Login failed")
#self.assertEqual(response.status_code, 200)
#self.assertContains(response, "My Unique Item", count=None, status_code=200, msg_prefix='')
response = self.client.get(reverse("blog_pages"), HTTP_HOST=self.HTTP_HOST)
self.assertEqual(response.status_code, 200)
# Test add new post
response = self.client.get(reverse("post_add"), HTTP_HOST=self.HTTP_HOST)
self.assertEqual(response.status_code, 200)
posts = Post.objects.filter(shop=self.shop)
l = len(posts)
response = self.client.post(reverse("post_add"),{'title': 'Post Title', 'body': "Post body"}, HTTP_HOST=self.HTTP_HOST, follow=True)
# Check that redirects to blog list
self.assertContains(response, "Post successfully saved.", count=None, status_code=200, msg_prefix='')
# Check that there is one more post
posts = Post.objects.filter(shop=self.shop)
self.assertEqual(posts.count(), l + 1)
# Test post edition
post = Post(shop=self.shop, title="Orignal title", body="original body")
post.save()
post_id = post.id
response = self.client.post(reverse("post_edit", args=[post_id]), {'title': 'New Title', 'body': "New body"}, HTTP_HOST=self.HTTP_HOST, follow=True)
self.assertContains(response, "Post successfully saved.", count=None, status_code=200, msg_prefix='')
# Check that post was really edited
edited_post = Post.objects.filter(id=post_id)[0]
self.assertEqual(edited_post.title, "New Title")
self.assertEqual(edited_post.body, "New body")
# Test post deletion
self.assertEqual(len(Post.objects.filter(id=post_id)), 1)
response = self.client.get(reverse("post_delete", args=[post_id]), HTTP_HOST=self.HTTP_HOST, follow=True)
self.assertEqual(response.status_code, 200)
# Check that post was deleted
self.assertEqual(len(Post.objects.filter(id=post_id)), 0)
def test_pages(self):
user = self.shop.admin
success = self.client.login(username=user.username, password="test")
self.assertEqual(success, True, "Login failed")
# Home page
response = self.client.get(reverse("page_edit_home"), HTTP_HOST=self.HTTP_HOST)
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse("page_edit_home"), {'title': 'Welcome to this shop. I just have to change this title', 'body': 'this body chages too!'}, HTTP_HOST=self.HTTP_HOST, follow=True)
self.assertEqual(response.status_code, 200)
home = Home.objects.filter(shop=self.shop)[0]
self.assertEqual(home.title,'Welcome to this shop. I just have to change this title')
# About us page
response = self.client.get(reverse("page_edit_about"), HTTP_HOST=self.HTTP_HOST)
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse("page_edit_about"), {'title': 'Some about us text', 'body': 'this body chages too!'}, HTTP_HOST=self.HTTP_HOST, follow=True)
self.assertContains(response, "Page successfully saved.", count=None, status_code=200, msg_prefix='')
about = About.objects.filter(shop=self.shop)[0]
self.assertEqual(about.title,'Some about us text')
# New Page
response = self.client.get(reverse("page_create"), HTTP_HOST=self.HTTP_HOST)
self.assertEqual(response.status_code, 200)
total_pages = Page.objects.all().count()
response = self.client.post(reverse("page_create"), {'title': 'A page title', 'name': 'some name', 'name_link': 'some-name-link', 'body': 'this is the page body!'}, HTTP_HOST=self.HTTP_HOST, follow=True)
self.assertContains(response, "Page successfully saved.", count=None, status_code=200, msg_prefix='')
new_total_pages = Page.objects.all().count()
self.assertEqual(new_total_pages, total_pages + 1)
# Edit page
page = Page.objects.filter(shop=self.shop)[0]
page_id = page.id
response = self.client.get(reverse("page_edit", args=[page_id]), HTTP_HOST=self.HTTP_HOST)
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse("page_edit", args=[page_id]), {'title': 'This is the new title', 'name': 'some new name', 'name_link': 'some-new-link', 'body': 'this is the new body'}, HTTP_HOST=self.HTTP_HOST, follow=True)
self.assertContains(response, "Page successfully edited.", count=None, status_code=200, msg_prefix='')
edited_page = Page.objects.filter(id=page_id)[0]
self.assertEqual(edited_page.title, "This is the new title")
self.assertEqual(edited_page.name, "some new name")
self.assertEqual(edited_page.name_link, "some-new-link")
self.assertEqual(edited_page.body, "this is the new body")
response = self.client.get(reverse("page_delete", args=[page_id]), HTTP_HOST=self.HTTP_HOST)
self.assertEqual(len(Page.objects.filter(id=page_id)), 0)
def test_links(self):
user = self.shop.admin
success = self.client.login(username=user.username, password="test")
self.assertEqual(success, True, "Login failed")
# Links
response = self.client.get(reverse("navigation"), HTTP_HOST=self.HTTP_HOST)
self.assertEqual(response.status_code, 200)
menu = self.shop.menu_set.all()[0]
# Add link
response = self.client.get(reverse("link_add", args=[menu.id]), HTTP_HOST=self.HTTP_HOST)
self.assertEqual(response.status_code, 200)
a_links = Link.objects.all().count()
response = self.client.post(reverse("link_add", args=[menu.id]), {'name': 'Link name' , 'to': '/home/' , 'title': 'link title'} , HTTP_HOST=self.HTTP_HOST, follow=True)
self.assertContains(response, "Link successfully saved", count=None, status_code=200, msg_prefix='')
b_links = Link.objects.all().count()
self.assertEquals(b_links, a_links + 1)
link = Link.objects.all()[0]
# Edit link
response = self.client.get(reverse("link_edit", args=[link.id]), HTTP_HOST=self.HTTP_HOST)
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse("link_edit", args=[link.id]), {'name': 'Link name' , 'to': '/home/' , 'title': 'link title'} , HTTP_HOST=self.HTTP_HOST, follow=True)
self.assertContains(response, "Link successfully edited", count=None, status_code=200, msg_prefix='')
# Delete link
response = self.client.get(reverse("link_delete", args=[link.id]), HTTP_HOST=self.HTTP_HOST, follow=True)
self.assertContains(response, "Link successfully deleted", count=None, status_code=200, msg_prefix='')
# response = self.client.get(reverse("link_order"), HTTP_HOST=self.HTTP_HOST)
# self.assertEqual(response.status_code, 200)
| apache-2.0 |
JeremyRand/namecore | contrib/spendfrom/spendfrom.py | 680 | 10053 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 18332 if testnet else 8332
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| mit |
LevinJ/Supply-demand-forecasting | implement/xgboostmodel.py | 1 | 4070 | import sys
import os
sys.path.insert(0, os.path.abspath('..'))
from preprocess.preparedata import PrepareData
import numpy as np
from utility.runtype import RunType
from utility.datafilepath import g_singletonDataFilePath
from preprocess.splittrainvalidation import HoldoutSplitMethod
import xgboost as xgb
from evaluation.sklearnmape import mean_absolute_percentage_error_xgboost
from evaluation.sklearnmape import mean_absolute_percentage_error
from utility.modelframework import ModelFramework
from utility.xgbbasemodel import XGBoostGridSearch
from evaluation.sklearnmape import mean_absolute_percentage_error_xgboost_cv
from utility.xgbbasemodel import XGBoostBase
import logging
import sys
class DidiXGBoostModel(XGBoostBase, PrepareData, XGBoostGridSearch):
def __init__(self):
PrepareData.__init__(self)
XGBoostGridSearch.__init__(self)
XGBoostBase.__init__(self)
self.best_score_colname_in_cv = 'test-mape-mean'
self.do_cross_val = False
self.train_validation_foldid = -2
if self.do_cross_val is None:
root = logging.getLogger()
root.setLevel(logging.DEBUG)
root.addHandler(logging.StreamHandler(sys.stdout))
root.addHandler(logging.FileHandler('logs/finetune_parameters.log', mode='w'))
return
def set_xgb_parameters(self):
early_stopping_rounds = 3
self.xgb_params = {'silent':1, 'colsample_bytree': 0.8, 'silent': 1, 'lambda ': 1, 'min_child_weight': 1, 'subsample': 0.8, 'eta': 0.01, 'objective': 'reg:linear', 'max_depth': 7}
# self.xgb_params = {'silent':1 }
self.xgb_learning_params = {
'num_boost_round': 200,
'callbacks':[xgb.callback.print_evaluation(show_stdv=True),xgb.callback.early_stop(early_stopping_rounds)],
'feval':mean_absolute_percentage_error_xgboost_cv}
if self.do_cross_val == False:
self.xgb_learning_params['feval'] = mean_absolute_percentage_error_xgboost
return
def get_paramgrid_1(self):
"""
This method must be overriden by derived class when its objective is not reg:linear
"""
param_grid = {'max_depth':[6], 'eta':[0.1], 'min_child_weight':[1],'silent':[1],
'objective':['reg:linear'],'colsample_bytree':[0.8],'subsample':[0.8], 'lambda ':[1]}
return param_grid
def get_paramgrid_2(self, param_grid):
"""
This method must be overriden by derived class if it intends to fine tune parameters
"""
self.ramdonized_search_enable = False
self.randomized_search_n_iter = 150
self.grid_search_display_result = True
param_grid['eta'] = [0.01] #train-mape:-0.448062+0.00334926 test-mape:-0.448402+0.00601761
# param_grid['max_depth'] = [7] #train-mape:-0.363007+0.00454276 test-mape:-0.452832+0.00321641
# param_grid['colsample_bytree'] = [0.8]
param_grid['max_depth'] = range(5,8) #train-mape:-0.363007+0.00454276 test-mape:-0.452832+0.00321641
param_grid['colsample_bytree'] = [0.6,0.8,1.0]
# param_grid['lambda'] = range(1,15)
# param_grid['max_depth'] = [3,4]
# param_grid['eta'] = [0.01,0.1] # 0.459426+0.00518875
# param_grid['subsample'] = [0.5] #0.458935+0.00522205
# param_grid['eta'] = [0.005] #0.457677+0.00526401
return param_grid
def get_learning_params(self):
"""e
This method must be overriden by derived class if it intends to fine tune parameters
"""
num_boost_round = 100
early_stopping_rounds = 5
kwargs = {'num_boost_round':num_boost_round, 'feval':mean_absolute_percentage_error_xgboost_cv,
'callbacks':[xgb.callback.print_evaluation(show_stdv=True),xgb.callback.early_stop(early_stopping_rounds)]}
return kwargs
if __name__ == "__main__":
obj= DidiXGBoostModel()
obj.run() | mit |
MarkWh1te/xueqiu_predict | python3_env/lib/python3.4/site-packages/pygments/token.py | 19 | 6067 | # -*- coding: utf-8 -*-
"""
pygments.token
~~~~~~~~~~~~~~
Basic token types and the standard tokens.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class _TokenType(tuple):
parent = None
def split(self):
buf = []
node = self
while node is not None:
buf.append(node)
node = node.parent
buf.reverse()
return buf
def __init__(self, *args):
# no need to call super.__init__
self.subtypes = set()
def __contains__(self, val):
return self is val or (
type(val) is self.__class__ and
val[:len(self)] == self
)
def __getattr__(self, val):
if not val or not val[0].isupper():
return tuple.__getattribute__(self, val)
new = _TokenType(self + (val,))
setattr(self, val, new)
self.subtypes.add(new)
new.parent = self
return new
def __repr__(self):
return 'Token' + (self and '.' or '') + '.'.join(self)
def __copy__(self):
# These instances are supposed to be singletons
return self
def __deepcopy__(self, memo):
# These instances are supposed to be singletons
return self
Token = _TokenType()
# Special token types
Text = Token.Text
Whitespace = Text.Whitespace
Escape = Token.Escape
Error = Token.Error
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
Other = Token.Other
# Common token types for source code
Keyword = Token.Keyword
Name = Token.Name
Literal = Token.Literal
String = Literal.String
Number = Literal.Number
Punctuation = Token.Punctuation
Operator = Token.Operator
Comment = Token.Comment
# Generic types for non-source code
Generic = Token.Generic
# String and some others are not direct childs of Token.
# alias them:
Token.Token = Token
Token.String = String
Token.Number = Number
def is_token_subtype(ttype, other):
"""
Return True if ``ttype`` is a subtype of ``other``.
exists for backwards compatibility. use ``ttype in other`` now.
"""
return ttype in other
def string_to_tokentype(s):
"""
Convert a string into a token type::
>>> string_to_token('String.Double')
Token.Literal.String.Double
>>> string_to_token('Token.Literal.Number')
Token.Literal.Number
>>> string_to_token('')
Token
Tokens that are already tokens are returned unchanged:
>>> string_to_token(String)
Token.Literal.String
"""
if isinstance(s, _TokenType):
return s
if not s:
return Token
node = Token
for item in s.split('.'):
node = getattr(node, item)
return node
# Map standard token types to short names, used in CSS class naming.
# If you add a new item, please be sure to run this file to perform
# a consistency check for duplicate values.
STANDARD_TYPES = {
Token: '',
Text: '',
Whitespace: 'w',
Escape: 'esc',
Error: 'err',
Other: 'x',
Keyword: 'k',
Keyword.Constant: 'kc',
Keyword.Declaration: 'kd',
Keyword.Namespace: 'kn',
Keyword.Pseudo: 'kp',
Keyword.Reserved: 'kr',
Keyword.Type: 'kt',
Name: 'n',
Name.Attribute: 'na',
Name.Builtin: 'nb',
Name.Builtin.Pseudo: 'bp',
Name.Class: 'nc',
Name.Constant: 'no',
Name.Decorator: 'nd',
Name.Entity: 'ni',
Name.Exception: 'ne',
Name.Function: 'nf',
Name.Property: 'py',
Name.Label: 'nl',
Name.Namespace: 'nn',
Name.Other: 'nx',
Name.Tag: 'nt',
Name.Variable: 'nv',
Name.Variable.Class: 'vc',
Name.Variable.Global: 'vg',
Name.Variable.Instance: 'vi',
Literal: 'l',
Literal.Date: 'ld',
String: 's',
String.Backtick: 'sb',
String.Char: 'sc',
String.Doc: 'sd',
String.Double: 's2',
String.Escape: 'se',
String.Heredoc: 'sh',
String.Interpol: 'si',
String.Other: 'sx',
String.Regex: 'sr',
String.Single: 's1',
String.Symbol: 'ss',
Number: 'm',
Number.Bin: 'mb',
Number.Float: 'mf',
Number.Hex: 'mh',
Number.Integer: 'mi',
Number.Integer.Long: 'il',
Number.Oct: 'mo',
Operator: 'o',
Operator.Word: 'ow',
Punctuation: 'p',
Comment: 'c',
Comment.Hashbang: 'ch',
Comment.Multiline: 'cm',
Comment.Preproc: 'cp',
Comment.PreprocFile: 'cpf',
Comment.Single: 'c1',
Comment.Special: 'cs',
Generic: 'g',
Generic.Deleted: 'gd',
Generic.Emph: 'ge',
Generic.Error: 'gr',
Generic.Heading: 'gh',
Generic.Inserted: 'gi',
Generic.Output: 'go',
Generic.Prompt: 'gp',
Generic.Strong: 'gs',
Generic.Subheading: 'gu',
Generic.Traceback: 'gt',
}
| mit |
benoitsteiner/tensorflow-opencl | tensorflow/python/kernel_tests/logging_ops_test.py | 30 | 2954 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.logging_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class LoggingOpsTest(test.TestCase):
def testAssertDivideByZero(self):
with self.test_session() as sess:
epsilon = ops.convert_to_tensor(1e-20)
x = ops.convert_to_tensor(0.0)
y = ops.convert_to_tensor(1.0)
z = ops.convert_to_tensor(2.0)
# assert(epsilon < y)
# z / y
with sess.graph.control_dependencies([
control_flow_ops.Assert(
math_ops.less(epsilon, y), ["Divide-by-zero"])
]):
out = math_ops.div(z, y)
self.assertAllEqual(2.0, out.eval())
# assert(epsilon < x)
# z / x
#
# This tests printing out multiple tensors
with sess.graph.control_dependencies([
control_flow_ops.Assert(
math_ops.less(epsilon, x), ["Divide-by-zero", "less than x"])
]):
out = math_ops.div(z, x)
with self.assertRaisesOpError("less than x"):
out.eval()
class PrintGradientTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testPrintShape(self):
inp = constant_op.constant(2.0, shape=[100, 32])
inp_printed = logging_ops.Print(inp, [inp])
self.assertEqual(inp.get_shape(), inp_printed.get_shape())
def testPrintGradient(self):
with self.test_session():
inp = constant_op.constant(2.0, shape=[100, 32], name="in")
w = constant_op.constant(4.0, shape=[10, 100], name="w")
wx = math_ops.matmul(w, inp, name="wx")
wx_print = logging_ops.Print(wx, [w, w, w])
wx_grad = gradients_impl.gradients(wx, w)[0]
wx_print_grad = gradients_impl.gradients(wx_print, w)[0]
wxg = wx_grad.eval()
wxpg = wx_print_grad.eval()
self.assertAllEqual(wxg, wxpg)
if __name__ == "__main__":
test.main()
| apache-2.0 |
zachjanicki/osf.io | website/notifications/events/utils.py | 66 | 5782 | from itertools import product
from website.notifications.emails import compile_subscriptions
from website.notifications import utils, constants
def get_file_subs_from_folder(addon, user, kind, path, name):
"""Find the file tree under a specified folder."""
folder = dict(kind=kind, path=path, name=name)
file_tree = addon._get_file_tree(filenode=folder, user=user, version='latest-published')
return list_of_files(file_tree)
def list_of_files(file_object):
files = []
if file_object['kind'] == 'file':
return [file_object['path']]
else:
for child in file_object['children']:
files.extend(list_of_files(child))
return files
def compile_user_lists(files, user, source_node, node):
"""Take multiple file ids and compiles them.
:param files: List of WaterButler paths
:param user: User who initiated action/event
:param source_node: Node instance from
:param node: Node instance to
:return: move, warn, and remove dicts
"""
# initialise subscription dictionaries
move = {key: [] for key in constants.NOTIFICATION_TYPES}
warn = {key: [] for key in constants.NOTIFICATION_TYPES}
remove = {key: [] for key in constants.NOTIFICATION_TYPES}
# get the node subscription
if len(files) == 0:
move, warn, remove = categorize_users(
user, 'file_updated', source_node, 'file_updated', node
)
# iterate through file subscriptions
for file_path in files:
path = file_path.strip('/')
t_move, t_warn, t_remove = categorize_users(
user, path + '_file_updated', source_node,
path + '_file_updated', node
)
# Add file subs to overall list of subscriptions
for notification in constants.NOTIFICATION_TYPES:
move[notification] = list(set(move[notification]).union(set(t_move[notification])))
warn[notification] = list(set(warn[notification]).union(set(t_warn[notification])))
remove[notification] = list(set(remove[notification]).union(set(t_remove[notification])))
return move, warn, remove
def categorize_users(user, source_event, source_node, event, node):
"""Categorize users from a file subscription into three categories.
Puts users in one of three bins:
- Moved: User has permissions on both nodes, subscribed to both
- Warned: User has permissions on both, not subscribed to destination
- Removed: Does not have permission on destination node
:param user: User instance who started the event
:param source_event: <guid>_event_name
:param source_node: node from where the event happened
:param event: new guid event name
:param node: node where event ends up
:return: Moved, to be warned, and removed users.
"""
remove = utils.users_to_remove(source_event, source_node, node)
source_node_subs = compile_subscriptions(source_node, utils.find_subscription_type(source_event))
new_subs = compile_subscriptions(node, utils.find_subscription_type(source_event), event)
# Moves users into the warn bucket or the move bucket
move = subscriptions_users_union(source_node_subs, new_subs)
warn = subscriptions_users_difference(source_node_subs, new_subs)
# Removes users without permissions
warn, remove = subscriptions_node_permissions(node, warn, remove)
# Remove duplicates
warn = subscriptions_users_remove_duplicates(warn, new_subs, remove_same=False)
move = subscriptions_users_remove_duplicates(move, new_subs, remove_same=False)
# Remove duplicates between move and warn; and move and remove
move = subscriptions_users_remove_duplicates(move, warn, remove_same=True)
move = subscriptions_users_remove_duplicates(move, remove, remove_same=True)
for notifications in constants.NOTIFICATION_TYPES:
# Remove the user who started this whole thing.
user_id = user._id
if user_id in warn[notifications]:
warn[notifications].remove(user_id)
if user_id in move[notifications]:
move[notifications].remove(user_id)
if user_id in remove[notifications]:
remove[notifications].remove(user_id)
return move, warn, remove
def subscriptions_node_permissions(node, warn_subscription, remove_subscription):
for notification in constants.NOTIFICATION_TYPES:
subbed, removed = utils.separate_users(node, warn_subscription[notification])
warn_subscription[notification] = subbed
remove_subscription[notification].extend(removed)
remove_subscription[notification] = list(set(remove_subscription[notification]))
return warn_subscription, remove_subscription
def subscriptions_users_union(emails_1, emails_2):
return {
notification:
list(
set(emails_1[notification]).union(set(emails_2[notification]))
)
for notification in constants.NOTIFICATION_TYPES.keys()
}
def subscriptions_users_difference(emails_1, emails_2):
return {
notification:
list(
set(emails_1[notification]).difference(set(emails_2[notification]))
)
for notification in constants.NOTIFICATION_TYPES.keys()
}
def subscriptions_users_remove_duplicates(emails_1, emails_2, remove_same=False):
emails_list = dict(emails_1)
product_list = product(constants.NOTIFICATION_TYPES, repeat=2)
for notification_1, notification_2 in product_list:
if notification_2 == notification_1 and not remove_same or notification_2 == 'none':
continue
emails_list[notification_1] = list(
set(emails_list[notification_1]).difference(set(emails_2[notification_2]))
)
return emails_list
| apache-2.0 |
srjoglekar246/sympy | sympy/mpmath/identification.py | 12 | 28929 | """
Implements the PSLQ algorithm for integer relation detection,
and derivative algorithms for constant recognition.
"""
from .libmp.backend import xrange
from .libmp import int_types, sqrt_fixed
# round to nearest integer (can be done more elegantly...)
def round_fixed(x, prec):
return ((x + (1<<(prec-1))) >> prec) << prec
class IdentificationMethods(object):
pass
def pslq(ctx, x, tol=None, maxcoeff=1000, maxsteps=100, verbose=False):
r"""
Given a vector of real numbers `x = [x_0, x_1, ..., x_n]`, ``pslq(x)``
uses the PSLQ algorithm to find a list of integers
`[c_0, c_1, ..., c_n]` such that
.. math ::
|c_1 x_1 + c_2 x_2 + ... + c_n x_n| < \mathrm{tol}
and such that `\max |c_k| < \mathrm{maxcoeff}`. If no such vector
exists, :func:`~mpmath.pslq` returns ``None``. The tolerance defaults to
3/4 of the working precision.
**Examples**
Find rational approximations for `\pi`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> pslq([-1, pi], tol=0.01)
[22, 7]
>>> pslq([-1, pi], tol=0.001)
[355, 113]
>>> mpf(22)/7; mpf(355)/113; +pi
3.14285714285714
3.14159292035398
3.14159265358979
Pi is not a rational number with denominator less than 1000::
>>> pslq([-1, pi])
>>>
To within the standard precision, it can however be approximated
by at least one rational number with denominator less than `10^{12}`::
>>> p, q = pslq([-1, pi], maxcoeff=10**12)
>>> print(p); print(q)
238410049439
75888275702
>>> mpf(p)/q
3.14159265358979
The PSLQ algorithm can be applied to long vectors. For example,
we can investigate the rational (in)dependence of integer square
roots::
>>> mp.dps = 30
>>> pslq([sqrt(n) for n in range(2, 5+1)])
>>>
>>> pslq([sqrt(n) for n in range(2, 6+1)])
>>>
>>> pslq([sqrt(n) for n in range(2, 8+1)])
[2, 0, 0, 0, 0, 0, -1]
**Machin formulas**
A famous formula for `\pi` is Machin's,
.. math ::
\frac{\pi}{4} = 4 \operatorname{acot} 5 - \operatorname{acot} 239
There are actually infinitely many formulas of this type. Two
others are
.. math ::
\frac{\pi}{4} = \operatorname{acot} 1
\frac{\pi}{4} = 12 \operatorname{acot} 49 + 32 \operatorname{acot} 57
+ 5 \operatorname{acot} 239 + 12 \operatorname{acot} 110443
We can easily verify the formulas using the PSLQ algorithm::
>>> mp.dps = 30
>>> pslq([pi/4, acot(1)])
[1, -1]
>>> pslq([pi/4, acot(5), acot(239)])
[1, -4, 1]
>>> pslq([pi/4, acot(49), acot(57), acot(239), acot(110443)])
[1, -12, -32, 5, -12]
We could try to generate a custom Machin-like formula by running
the PSLQ algorithm with a few inverse cotangent values, for example
acot(2), acot(3) ... acot(10). Unfortunately, there is a linear
dependence among these values, resulting in only that dependence
being detected, with a zero coefficient for `\pi`::
>>> pslq([pi] + [acot(n) for n in range(2,11)])
[0, 1, -1, 0, 0, 0, -1, 0, 0, 0]
We get better luck by removing linearly dependent terms::
>>> pslq([pi] + [acot(n) for n in range(2,11) if n not in (3, 5)])
[1, -8, 0, 0, 4, 0, 0, 0]
In other words, we found the following formula::
>>> 8*acot(2) - 4*acot(7)
3.14159265358979323846264338328
>>> +pi
3.14159265358979323846264338328
**Algorithm**
This is a fairly direct translation to Python of the pseudocode given by
David Bailey, "The PSLQ Integer Relation Algorithm":
http://www.cecm.sfu.ca/organics/papers/bailey/paper/html/node3.html
The present implementation uses fixed-point instead of floating-point
arithmetic, since this is significantly (about 7x) faster.
"""
n = len(x)
assert n >= 2
# At too low precision, the algorithm becomes meaningless
prec = ctx.prec
assert prec >= 53
if verbose and prec // max(2,n) < 5:
print("Warning: precision for PSLQ may be too low")
target = int(prec * 0.75)
if tol is None:
tol = ctx.mpf(2)**(-target)
else:
tol = ctx.convert(tol)
extra = 60
prec += extra
if verbose:
print("PSLQ using prec %i and tol %s" % (prec, ctx.nstr(tol)))
tol = ctx.to_fixed(tol, prec)
assert tol
# Convert to fixed-point numbers. The dummy None is added so we can
# use 1-based indexing. (This just allows us to be consistent with
# Bailey's indexing. The algorithm is 100 lines long, so debugging
# a single wrong index can be painful.)
x = [None] + [ctx.to_fixed(ctx.mpf(xk), prec) for xk in x]
# Sanity check on magnitudes
minx = min(abs(xx) for xx in x[1:])
if not minx:
raise ValueError("PSLQ requires a vector of nonzero numbers")
if minx < tol//100:
if verbose:
print("STOPPING: (one number is too small)")
return None
g = sqrt_fixed((4<<prec)//3, prec)
A = {}
B = {}
H = {}
# Initialization
# step 1
for i in xrange(1, n+1):
for j in xrange(1, n+1):
A[i,j] = B[i,j] = (i==j) << prec
H[i,j] = 0
# step 2
s = [None] + [0] * n
for k in xrange(1, n+1):
t = 0
for j in xrange(k, n+1):
t += (x[j]**2 >> prec)
s[k] = sqrt_fixed(t, prec)
t = s[1]
y = x[:]
for k in xrange(1, n+1):
y[k] = (x[k] << prec) // t
s[k] = (s[k] << prec) // t
# step 3
for i in xrange(1, n+1):
for j in xrange(i+1, n):
H[i,j] = 0
if i <= n-1:
if s[i]:
H[i,i] = (s[i+1] << prec) // s[i]
else:
H[i,i] = 0
for j in range(1, i):
sjj1 = s[j]*s[j+1]
if sjj1:
H[i,j] = ((-y[i]*y[j])<<prec)//sjj1
else:
H[i,j] = 0
# step 4
for i in xrange(2, n+1):
for j in xrange(i-1, 0, -1):
#t = floor(H[i,j]/H[j,j] + 0.5)
if H[j,j]:
t = round_fixed((H[i,j] << prec)//H[j,j], prec)
else:
#t = 0
continue
y[j] = y[j] + (t*y[i] >> prec)
for k in xrange(1, j+1):
H[i,k] = H[i,k] - (t*H[j,k] >> prec)
for k in xrange(1, n+1):
A[i,k] = A[i,k] - (t*A[j,k] >> prec)
B[k,j] = B[k,j] + (t*B[k,i] >> prec)
# Main algorithm
for REP in range(maxsteps):
# Step 1
m = -1
szmax = -1
for i in range(1, n):
h = H[i,i]
sz = (g**i * abs(h)) >> (prec*(i-1))
if sz > szmax:
m = i
szmax = sz
# Step 2
y[m], y[m+1] = y[m+1], y[m]
tmp = {}
for i in xrange(1,n+1): H[m,i], H[m+1,i] = H[m+1,i], H[m,i]
for i in xrange(1,n+1): A[m,i], A[m+1,i] = A[m+1,i], A[m,i]
for i in xrange(1,n+1): B[i,m], B[i,m+1] = B[i,m+1], B[i,m]
# Step 3
if m <= n - 2:
t0 = sqrt_fixed((H[m,m]**2 + H[m,m+1]**2)>>prec, prec)
# A zero element probably indicates that the precision has
# been exhausted. XXX: this could be spurious, due to
# using fixed-point arithmetic
if not t0:
break
t1 = (H[m,m] << prec) // t0
t2 = (H[m,m+1] << prec) // t0
for i in xrange(m, n+1):
t3 = H[i,m]
t4 = H[i,m+1]
H[i,m] = (t1*t3+t2*t4) >> prec
H[i,m+1] = (-t2*t3+t1*t4) >> prec
# Step 4
for i in xrange(m+1, n+1):
for j in xrange(min(i-1, m+1), 0, -1):
try:
t = round_fixed((H[i,j] << prec)//H[j,j], prec)
# Precision probably exhausted
except ZeroDivisionError:
break
y[j] = y[j] + ((t*y[i]) >> prec)
for k in xrange(1, j+1):
H[i,k] = H[i,k] - (t*H[j,k] >> prec)
for k in xrange(1, n+1):
A[i,k] = A[i,k] - (t*A[j,k] >> prec)
B[k,j] = B[k,j] + (t*B[k,i] >> prec)
# Until a relation is found, the error typically decreases
# slowly (e.g. a factor 1-10) with each step TODO: we could
# compare err from two successive iterations. If there is a
# large drop (several orders of magnitude), that indicates a
# "high quality" relation was detected. Reporting this to
# the user somehow might be useful.
best_err = maxcoeff<<prec
for i in xrange(1, n+1):
err = abs(y[i])
# Maybe we are done?
if err < tol:
# We are done if the coefficients are acceptable
vec = [int(round_fixed(B[j,i], prec) >> prec) for j in \
range(1,n+1)]
if max(abs(v) for v in vec) < maxcoeff:
if verbose:
print("FOUND relation at iter %i/%i, error: %s" % \
(REP, maxsteps, ctx.nstr(err / ctx.mpf(2)**prec, 1)))
return vec
best_err = min(err, best_err)
# Calculate a lower bound for the norm. We could do this
# more exactly (using the Euclidean norm) but there is probably
# no practical benefit.
recnorm = max(abs(h) for h in H.values())
if recnorm:
norm = ((1 << (2*prec)) // recnorm) >> prec
norm //= 100
else:
norm = ctx.inf
if verbose:
print("%i/%i: Error: %8s Norm: %s" % \
(REP, maxsteps, ctx.nstr(best_err / ctx.mpf(2)**prec, 1), norm))
if norm >= maxcoeff:
break
if verbose:
print("CANCELLING after step %i/%i." % (REP, maxsteps))
print("Could not find an integer relation. Norm bound: %s" % norm)
return None
def findpoly(ctx, x, n=1, **kwargs):
r"""
``findpoly(x, n)`` returns the coefficients of an integer
polynomial `P` of degree at most `n` such that `P(x) \approx 0`.
If no polynomial having `x` as a root can be found,
:func:`~mpmath.findpoly` returns ``None``.
:func:`~mpmath.findpoly` works by successively calling :func:`~mpmath.pslq` with
the vectors `[1, x]`, `[1, x, x^2]`, `[1, x, x^2, x^3]`, ...,
`[1, x, x^2, .., x^n]` as input. Keyword arguments given to
:func:`~mpmath.findpoly` are forwarded verbatim to :func:`~mpmath.pslq`. In
particular, you can specify a tolerance for `P(x)` with ``tol``
and a maximum permitted coefficient size with ``maxcoeff``.
For large values of `n`, it is recommended to run :func:`~mpmath.findpoly`
at high precision; preferably 50 digits or more.
**Examples**
By default (degree `n = 1`), :func:`~mpmath.findpoly` simply finds a linear
polynomial with a rational root::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> findpoly(0.7)
[-10, 7]
The generated coefficient list is valid input to ``polyval`` and
``polyroots``::
>>> nprint(polyval(findpoly(phi, 2), phi), 1)
-2.0e-16
>>> for r in polyroots(findpoly(phi, 2)):
... print(r)
...
-0.618033988749895
1.61803398874989
Numbers of the form `m + n \sqrt p` for integers `(m, n, p)` are
solutions to quadratic equations. As we find here, `1+\sqrt 2`
is a root of the polynomial `x^2 - 2x - 1`::
>>> findpoly(1+sqrt(2), 2)
[1, -2, -1]
>>> findroot(lambda x: x**2 - 2*x - 1, 1)
2.4142135623731
Despite only containing square roots, the following number results
in a polynomial of degree 4::
>>> findpoly(sqrt(2)+sqrt(3), 4)
[1, 0, -10, 0, 1]
In fact, `x^4 - 10x^2 + 1` is the *minimal polynomial* of
`r = \sqrt 2 + \sqrt 3`, meaning that a rational polynomial of
lower degree having `r` as a root does not exist. Given sufficient
precision, :func:`~mpmath.findpoly` will usually find the correct
minimal polynomial of a given algebraic number.
**Non-algebraic numbers**
If :func:`~mpmath.findpoly` fails to find a polynomial with given
coefficient size and tolerance constraints, that means no such
polynomial exists.
We can verify that `\pi` is not an algebraic number of degree 3 with
coefficients less than 1000::
>>> mp.dps = 15
>>> findpoly(pi, 3)
>>>
It is always possible to find an algebraic approximation of a number
using one (or several) of the following methods:
1. Increasing the permitted degree
2. Allowing larger coefficients
3. Reducing the tolerance
One example of each method is shown below::
>>> mp.dps = 15
>>> findpoly(pi, 4)
[95, -545, 863, -183, -298]
>>> findpoly(pi, 3, maxcoeff=10000)
[836, -1734, -2658, -457]
>>> findpoly(pi, 3, tol=1e-7)
[-4, 22, -29, -2]
It is unknown whether Euler's constant is transcendental (or even
irrational). We can use :func:`~mpmath.findpoly` to check that if is
an algebraic number, its minimal polynomial must have degree
at least 7 and a coefficient of magnitude at least 1000000::
>>> mp.dps = 200
>>> findpoly(euler, 6, maxcoeff=10**6, tol=1e-100, maxsteps=1000)
>>>
Note that the high precision and strict tolerance is necessary
for such high-degree runs, since otherwise unwanted low-accuracy
approximations will be detected. It may also be necessary to set
maxsteps high to prevent a premature exit (before the coefficient
bound has been reached). Running with ``verbose=True`` to get an
idea what is happening can be useful.
"""
x = ctx.mpf(x)
assert n >= 1
if x == 0:
return [1, 0]
xs = [ctx.mpf(1)]
for i in range(1,n+1):
xs.append(x**i)
a = ctx.pslq(xs, **kwargs)
if a is not None:
return a[::-1]
def fracgcd(p, q):
x, y = p, q
while y:
x, y = y, x % y
if x != 1:
p //= x
q //= x
if q == 1:
return p
return p, q
def pslqstring(r, constants):
q = r[0]
r = r[1:]
s = []
for i in range(len(r)):
p = r[i]
if p:
z = fracgcd(-p,q)
cs = constants[i][1]
if cs == '1':
cs = ''
else:
cs = '*' + cs
if isinstance(z, int_types):
if z > 0: term = str(z) + cs
else: term = ("(%s)" % z) + cs
else:
term = ("(%s/%s)" % z) + cs
s.append(term)
s = ' + '.join(s)
if '+' in s or '*' in s:
s = '(' + s + ')'
return s or '0'
def prodstring(r, constants):
q = r[0]
r = r[1:]
num = []
den = []
for i in range(len(r)):
p = r[i]
if p:
z = fracgcd(-p,q)
cs = constants[i][1]
if isinstance(z, int_types):
if abs(z) == 1: t = cs
else: t = '%s**%s' % (cs, abs(z))
([num,den][z<0]).append(t)
else:
t = '%s**(%s/%s)' % (cs, abs(z[0]), z[1])
([num,den][z[0]<0]).append(t)
num = '*'.join(num)
den = '*'.join(den)
if num and den: return "(%s)/(%s)" % (num, den)
if num: return num
if den: return "1/(%s)" % den
def quadraticstring(ctx,t,a,b,c):
if c < 0:
a,b,c = -a,-b,-c
u1 = (-b+ctx.sqrt(b**2-4*a*c))/(2*c)
u2 = (-b-ctx.sqrt(b**2-4*a*c))/(2*c)
if abs(u1-t) < abs(u2-t):
if b: s = '((%s+sqrt(%s))/%s)' % (-b,b**2-4*a*c,2*c)
else: s = '(sqrt(%s)/%s)' % (-4*a*c,2*c)
else:
if b: s = '((%s-sqrt(%s))/%s)' % (-b,b**2-4*a*c,2*c)
else: s = '(-sqrt(%s)/%s)' % (-4*a*c,2*c)
return s
# Transformation y = f(x,c), with inverse function x = f(y,c)
# The third entry indicates whether the transformation is
# redundant when c = 1
transforms = [
(lambda ctx,x,c: x*c, '$y/$c', 0),
(lambda ctx,x,c: x/c, '$c*$y', 1),
(lambda ctx,x,c: c/x, '$c/$y', 0),
(lambda ctx,x,c: (x*c)**2, 'sqrt($y)/$c', 0),
(lambda ctx,x,c: (x/c)**2, '$c*sqrt($y)', 1),
(lambda ctx,x,c: (c/x)**2, '$c/sqrt($y)', 0),
(lambda ctx,x,c: c*x**2, 'sqrt($y)/sqrt($c)', 1),
(lambda ctx,x,c: x**2/c, 'sqrt($c)*sqrt($y)', 1),
(lambda ctx,x,c: c/x**2, 'sqrt($c)/sqrt($y)', 1),
(lambda ctx,x,c: ctx.sqrt(x*c), '$y**2/$c', 0),
(lambda ctx,x,c: ctx.sqrt(x/c), '$c*$y**2', 1),
(lambda ctx,x,c: ctx.sqrt(c/x), '$c/$y**2', 0),
(lambda ctx,x,c: c*ctx.sqrt(x), '$y**2/$c**2', 1),
(lambda ctx,x,c: ctx.sqrt(x)/c, '$c**2*$y**2', 1),
(lambda ctx,x,c: c/ctx.sqrt(x), '$c**2/$y**2', 1),
(lambda ctx,x,c: ctx.exp(x*c), 'log($y)/$c', 0),
(lambda ctx,x,c: ctx.exp(x/c), '$c*log($y)', 1),
(lambda ctx,x,c: ctx.exp(c/x), '$c/log($y)', 0),
(lambda ctx,x,c: c*ctx.exp(x), 'log($y/$c)', 1),
(lambda ctx,x,c: ctx.exp(x)/c, 'log($c*$y)', 1),
(lambda ctx,x,c: c/ctx.exp(x), 'log($c/$y)', 0),
(lambda ctx,x,c: ctx.ln(x*c), 'exp($y)/$c', 0),
(lambda ctx,x,c: ctx.ln(x/c), '$c*exp($y)', 1),
(lambda ctx,x,c: ctx.ln(c/x), '$c/exp($y)', 0),
(lambda ctx,x,c: c*ctx.ln(x), 'exp($y/$c)', 1),
(lambda ctx,x,c: ctx.ln(x)/c, 'exp($c*$y)', 1),
(lambda ctx,x,c: c/ctx.ln(x), 'exp($c/$y)', 0),
]
def identify(ctx, x, constants=[], tol=None, maxcoeff=1000, full=False,
verbose=False):
"""
Given a real number `x`, ``identify(x)`` attempts to find an exact
formula for `x`. This formula is returned as a string. If no match
is found, ``None`` is returned. With ``full=True``, a list of
matching formulas is returned.
As a simple example, :func:`~mpmath.identify` will find an algebraic
formula for the golden ratio::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> identify(phi)
'((1+sqrt(5))/2)'
:func:`~mpmath.identify` can identify simple algebraic numbers and simple
combinations of given base constants, as well as certain basic
transformations thereof. More specifically, :func:`~mpmath.identify`
looks for the following:
1. Fractions
2. Quadratic algebraic numbers
3. Rational linear combinations of the base constants
4. Any of the above after first transforming `x` into `f(x)` where
`f(x)` is `1/x`, `\sqrt x`, `x^2`, `\log x` or `\exp x`, either
directly or with `x` or `f(x)` multiplied or divided by one of
the base constants
5. Products of fractional powers of the base constants and
small integers
Base constants can be given as a list of strings representing mpmath
expressions (:func:`~mpmath.identify` will ``eval`` the strings to numerical
values and use the original strings for the output), or as a dict of
formula:value pairs.
In order not to produce spurious results, :func:`~mpmath.identify` should
be used with high precision; preferably 50 digits or more.
**Examples**
Simple identifications can be performed safely at standard
precision. Here the default recognition of rational, algebraic,
and exp/log of algebraic numbers is demonstrated::
>>> mp.dps = 15
>>> identify(0.22222222222222222)
'(2/9)'
>>> identify(1.9662210973805663)
'sqrt(((24+sqrt(48))/8))'
>>> identify(4.1132503787829275)
'exp((sqrt(8)/2))'
>>> identify(0.881373587019543)
'log(((2+sqrt(8))/2))'
By default, :func:`~mpmath.identify` does not recognize `\pi`. At standard
precision it finds a not too useful approximation. At slightly
increased precision, this approximation is no longer accurate
enough and :func:`~mpmath.identify` more correctly returns ``None``::
>>> identify(pi)
'(2**(176/117)*3**(20/117)*5**(35/39))/(7**(92/117))'
>>> mp.dps = 30
>>> identify(pi)
>>>
Numbers such as `\pi`, and simple combinations of user-defined
constants, can be identified if they are provided explicitly::
>>> identify(3*pi-2*e, ['pi', 'e'])
'(3*pi + (-2)*e)'
Here is an example using a dict of constants. Note that the
constants need not be "atomic"; :func:`~mpmath.identify` can just
as well express the given number in terms of expressions
given by formulas::
>>> identify(pi+e, {'a':pi+2, 'b':2*e})
'((-2) + 1*a + (1/2)*b)'
Next, we attempt some identifications with a set of base constants.
It is necessary to increase the precision a bit.
>>> mp.dps = 50
>>> base = ['sqrt(2)','pi','log(2)']
>>> identify(0.25, base)
'(1/4)'
>>> identify(3*pi + 2*sqrt(2) + 5*log(2)/7, base)
'(2*sqrt(2) + 3*pi + (5/7)*log(2))'
>>> identify(exp(pi+2), base)
'exp((2 + 1*pi))'
>>> identify(1/(3+sqrt(2)), base)
'((3/7) + (-1/7)*sqrt(2))'
>>> identify(sqrt(2)/(3*pi+4), base)
'sqrt(2)/(4 + 3*pi)'
>>> identify(5**(mpf(1)/3)*pi*log(2)**2, base)
'5**(1/3)*pi*log(2)**2'
An example of an erroneous solution being found when too low
precision is used::
>>> mp.dps = 15
>>> identify(1/(3*pi-4*e+sqrt(8)), ['pi', 'e', 'sqrt(2)'])
'((11/25) + (-158/75)*pi + (76/75)*e + (44/15)*sqrt(2))'
>>> mp.dps = 50
>>> identify(1/(3*pi-4*e+sqrt(8)), ['pi', 'e', 'sqrt(2)'])
'1/(3*pi + (-4)*e + 2*sqrt(2))'
**Finding approximate solutions**
The tolerance ``tol`` defaults to 3/4 of the working precision.
Lowering the tolerance is useful for finding approximate matches.
We can for example try to generate approximations for pi::
>>> mp.dps = 15
>>> identify(pi, tol=1e-2)
'(22/7)'
>>> identify(pi, tol=1e-3)
'(355/113)'
>>> identify(pi, tol=1e-10)
'(5**(339/269))/(2**(64/269)*3**(13/269)*7**(92/269))'
With ``full=True``, and by supplying a few base constants,
``identify`` can generate almost endless lists of approximations
for any number (the output below has been truncated to show only
the first few)::
>>> for p in identify(pi, ['e', 'catalan'], tol=1e-5, full=True):
... print(p)
... # doctest: +ELLIPSIS
e/log((6 + (-4/3)*e))
(3**3*5*e*catalan**2)/(2*7**2)
sqrt(((-13) + 1*e + 22*catalan))
log(((-6) + 24*e + 4*catalan)/e)
exp(catalan*((-1/5) + (8/15)*e))
catalan*(6 + (-6)*e + 15*catalan)
sqrt((5 + 26*e + (-3)*catalan))/e
e*sqrt(((-27) + 2*e + 25*catalan))
log(((-1) + (-11)*e + 59*catalan))
((3/20) + (21/20)*e + (3/20)*catalan)
...
The numerical values are roughly as close to `\pi` as permitted by the
specified tolerance:
>>> e/log(6-4*e/3)
3.14157719846001
>>> 135*e*catalan**2/98
3.14166950419369
>>> sqrt(e-13+22*catalan)
3.14158000062992
>>> log(24*e-6+4*catalan)-1
3.14158791577159
**Symbolic processing**
The output formula can be evaluated as a Python expression.
Note however that if fractions (like '2/3') are present in
the formula, Python's :func:`~mpmath.eval()` may erroneously perform
integer division. Note also that the output is not necessarily
in the algebraically simplest form::
>>> identify(sqrt(2))
'(sqrt(8)/2)'
As a solution to both problems, consider using SymPy's
:func:`~mpmath.sympify` to convert the formula into a symbolic expression.
SymPy can be used to pretty-print or further simplify the formula
symbolically::
>>> from sympy import sympify
>>> sympify(identify(sqrt(2)))
sqrt(2)
Sometimes :func:`~mpmath.identify` can simplify an expression further than
a symbolic algorithm::
>>> from sympy import simplify
>>> x = sympify('-1/(-3/2+(1/2)*sqrt(5))*sqrt(3/2-1/2*sqrt(5))')
>>> x
1/sqrt(3/2 - sqrt(5)/2)
>>> x = simplify(x)
>>> x
2/sqrt(6 - 2*sqrt(5))
>>> mp.dps = 30
>>> x = sympify(identify(x.evalf(30)))
>>> x
1/2 + sqrt(5)/2
(In fact, this functionality is available directly in SymPy as the
function :func:`~mpmath.nsimplify`, which is essentially a wrapper for
:func:`~mpmath.identify`.)
**Miscellaneous issues and limitations**
The input `x` must be a real number. All base constants must be
positive real numbers and must not be rationals or rational linear
combinations of each other.
The worst-case computation time grows quickly with the number of
base constants. Already with 3 or 4 base constants,
:func:`~mpmath.identify` may require several seconds to finish. To search
for relations among a large number of constants, you should
consider using :func:`~mpmath.pslq` directly.
The extended transformations are applied to x, not the constants
separately. As a result, ``identify`` will for example be able to
recognize ``exp(2*pi+3)`` with ``pi`` given as a base constant, but
not ``2*exp(pi)+3``. It will be able to recognize the latter if
``exp(pi)`` is given explicitly as a base constant.
"""
solutions = []
def addsolution(s):
if verbose: print("Found: ", s)
solutions.append(s)
x = ctx.mpf(x)
# Further along, x will be assumed positive
if x == 0:
if full: return ['0']
else: return '0'
if x < 0:
sol = ctx.identify(-x, constants, tol, maxcoeff, full, verbose)
if sol is None:
return sol
if full:
return ["-(%s)"%s for s in sol]
else:
return "-(%s)" % sol
if tol:
tol = ctx.mpf(tol)
else:
tol = ctx.eps**0.7
M = maxcoeff
if constants:
if isinstance(constants, dict):
constants = [(ctx.mpf(v), name) for (name, v) in constants.items()]
else:
namespace = dict((name, getattr(ctx,name)) for name in dir(ctx))
constants = [(eval(p, namespace), p) for p in constants]
else:
constants = []
# We always want to find at least rational terms
if 1 not in [value for (name, value) in constants]:
constants = [(ctx.mpf(1), '1')] + constants
# PSLQ with simple algebraic and functional transformations
for ft, ftn, red in transforms:
for c, cn in constants:
if red and cn == '1':
continue
t = ft(ctx,x,c)
# Prevent exponential transforms from wreaking havoc
if abs(t) > M**2 or abs(t) < tol:
continue
# Linear combination of base constants
r = ctx.pslq([t] + [a[0] for a in constants], tol, M)
s = None
if r is not None and max(abs(uw) for uw in r) <= M and r[0]:
s = pslqstring(r, constants)
# Quadratic algebraic numbers
else:
q = ctx.pslq([ctx.one, t, t**2], tol, M)
if q is not None and len(q) == 3 and q[2]:
aa, bb, cc = q
if max(abs(aa),abs(bb),abs(cc)) <= M:
s = quadraticstring(ctx,t,aa,bb,cc)
if s:
if cn == '1' and ('/$c' in ftn):
s = ftn.replace('$y', s).replace('/$c', '')
else:
s = ftn.replace('$y', s).replace('$c', cn)
addsolution(s)
if not full: return solutions[0]
if verbose:
print(".")
# Check for a direct multiplicative formula
if x != 1:
# Allow fractional powers of fractions
ilogs = [2,3,5,7]
# Watch out for existing fractional powers of fractions
logs = []
for a, s in constants:
if not sum(bool(ctx.findpoly(ctx.ln(a)/ctx.ln(i),1)) for i in ilogs):
logs.append((ctx.ln(a), s))
logs = [(ctx.ln(i),str(i)) for i in ilogs] + logs
r = ctx.pslq([ctx.ln(x)] + [a[0] for a in logs], tol, M)
if r is not None and max(abs(uw) for uw in r) <= M and r[0]:
addsolution(prodstring(r, logs))
if not full: return solutions[0]
if full:
return sorted(solutions, key=len)
else:
return None
IdentificationMethods.pslq = pslq
IdentificationMethods.findpoly = findpoly
IdentificationMethods.identify = identify
if __name__ == '__main__':
import doctest
doctest.testmod()
| bsd-3-clause |
lgscofield/odoo | addons/account_analytic_plans/wizard/__init__.py | 445 | 1117 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import analytic_plan_create_model
import account_crossovered_analytic
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jcoady9/python-for-android | python3-alpha/python3-src/Lib/encodings/cp737.py | 272 | 34681 | """ Python Character Mapping Codec cp737 generated from 'VENDORS/MICSFT/PC/CP737.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp737',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x0081: 0x0392, # GREEK CAPITAL LETTER BETA
0x0082: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x0083: 0x0394, # GREEK CAPITAL LETTER DELTA
0x0084: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x0085: 0x0396, # GREEK CAPITAL LETTER ZETA
0x0086: 0x0397, # GREEK CAPITAL LETTER ETA
0x0087: 0x0398, # GREEK CAPITAL LETTER THETA
0x0088: 0x0399, # GREEK CAPITAL LETTER IOTA
0x0089: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x008a: 0x039b, # GREEK CAPITAL LETTER LAMDA
0x008b: 0x039c, # GREEK CAPITAL LETTER MU
0x008c: 0x039d, # GREEK CAPITAL LETTER NU
0x008d: 0x039e, # GREEK CAPITAL LETTER XI
0x008e: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x008f: 0x03a0, # GREEK CAPITAL LETTER PI
0x0090: 0x03a1, # GREEK CAPITAL LETTER RHO
0x0091: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x0092: 0x03a4, # GREEK CAPITAL LETTER TAU
0x0093: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x0094: 0x03a6, # GREEK CAPITAL LETTER PHI
0x0095: 0x03a7, # GREEK CAPITAL LETTER CHI
0x0096: 0x03a8, # GREEK CAPITAL LETTER PSI
0x0097: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x0098: 0x03b1, # GREEK SMALL LETTER ALPHA
0x0099: 0x03b2, # GREEK SMALL LETTER BETA
0x009a: 0x03b3, # GREEK SMALL LETTER GAMMA
0x009b: 0x03b4, # GREEK SMALL LETTER DELTA
0x009c: 0x03b5, # GREEK SMALL LETTER EPSILON
0x009d: 0x03b6, # GREEK SMALL LETTER ZETA
0x009e: 0x03b7, # GREEK SMALL LETTER ETA
0x009f: 0x03b8, # GREEK SMALL LETTER THETA
0x00a0: 0x03b9, # GREEK SMALL LETTER IOTA
0x00a1: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00a2: 0x03bb, # GREEK SMALL LETTER LAMDA
0x00a3: 0x03bc, # GREEK SMALL LETTER MU
0x00a4: 0x03bd, # GREEK SMALL LETTER NU
0x00a5: 0x03be, # GREEK SMALL LETTER XI
0x00a6: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00a7: 0x03c0, # GREEK SMALL LETTER PI
0x00a8: 0x03c1, # GREEK SMALL LETTER RHO
0x00a9: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00aa: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00ab: 0x03c4, # GREEK SMALL LETTER TAU
0x00ac: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00ad: 0x03c6, # GREEK SMALL LETTER PHI
0x00ae: 0x03c7, # GREEK SMALL LETTER CHI
0x00af: 0x03c8, # GREEK SMALL LETTER PSI
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00e1: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x00e2: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x00e3: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x00e4: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00e5: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00e6: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00e7: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00e8: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00e9: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00ea: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x00eb: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x00ec: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x00ed: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x00ee: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x00ef: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x00f0: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x00f5: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\u0391' # 0x0080 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0x0081 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0x0082 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0x0083 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0x0084 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0x0085 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0x0086 -> GREEK CAPITAL LETTER ETA
'\u0398' # 0x0087 -> GREEK CAPITAL LETTER THETA
'\u0399' # 0x0088 -> GREEK CAPITAL LETTER IOTA
'\u039a' # 0x0089 -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0x008a -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0x008b -> GREEK CAPITAL LETTER MU
'\u039d' # 0x008c -> GREEK CAPITAL LETTER NU
'\u039e' # 0x008d -> GREEK CAPITAL LETTER XI
'\u039f' # 0x008e -> GREEK CAPITAL LETTER OMICRON
'\u03a0' # 0x008f -> GREEK CAPITAL LETTER PI
'\u03a1' # 0x0090 -> GREEK CAPITAL LETTER RHO
'\u03a3' # 0x0091 -> GREEK CAPITAL LETTER SIGMA
'\u03a4' # 0x0092 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0x0093 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0x0094 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0x0095 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0x0096 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0x0097 -> GREEK CAPITAL LETTER OMEGA
'\u03b1' # 0x0098 -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0x0099 -> GREEK SMALL LETTER BETA
'\u03b3' # 0x009a -> GREEK SMALL LETTER GAMMA
'\u03b4' # 0x009b -> GREEK SMALL LETTER DELTA
'\u03b5' # 0x009c -> GREEK SMALL LETTER EPSILON
'\u03b6' # 0x009d -> GREEK SMALL LETTER ZETA
'\u03b7' # 0x009e -> GREEK SMALL LETTER ETA
'\u03b8' # 0x009f -> GREEK SMALL LETTER THETA
'\u03b9' # 0x00a0 -> GREEK SMALL LETTER IOTA
'\u03ba' # 0x00a1 -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0x00a2 -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0x00a3 -> GREEK SMALL LETTER MU
'\u03bd' # 0x00a4 -> GREEK SMALL LETTER NU
'\u03be' # 0x00a5 -> GREEK SMALL LETTER XI
'\u03bf' # 0x00a6 -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0x00a7 -> GREEK SMALL LETTER PI
'\u03c1' # 0x00a8 -> GREEK SMALL LETTER RHO
'\u03c3' # 0x00a9 -> GREEK SMALL LETTER SIGMA
'\u03c2' # 0x00aa -> GREEK SMALL LETTER FINAL SIGMA
'\u03c4' # 0x00ab -> GREEK SMALL LETTER TAU
'\u03c5' # 0x00ac -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0x00ad -> GREEK SMALL LETTER PHI
'\u03c7' # 0x00ae -> GREEK SMALL LETTER CHI
'\u03c8' # 0x00af -> GREEK SMALL LETTER PSI
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03c9' # 0x00e0 -> GREEK SMALL LETTER OMEGA
'\u03ac' # 0x00e1 -> GREEK SMALL LETTER ALPHA WITH TONOS
'\u03ad' # 0x00e2 -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0x00e3 -> GREEK SMALL LETTER ETA WITH TONOS
'\u03ca' # 0x00e4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u03af' # 0x00e5 -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03cc' # 0x00e6 -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0x00e7 -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u03cb' # 0x00e8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03ce' # 0x00e9 -> GREEK SMALL LETTER OMEGA WITH TONOS
'\u0386' # 0x00ea -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\u0388' # 0x00eb -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u0389' # 0x00ec -> GREEK CAPITAL LETTER ETA WITH TONOS
'\u038a' # 0x00ed -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\u038c' # 0x00ee -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\u038e' # 0x00ef -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u038f' # 0x00f0 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u03aa' # 0x00f4 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u03ab' # 0x00f5 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b7: 0x00fa, # MIDDLE DOT
0x00f7: 0x00f6, # DIVISION SIGN
0x0386: 0x00ea, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x0388: 0x00eb, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x0389: 0x00ec, # GREEK CAPITAL LETTER ETA WITH TONOS
0x038a: 0x00ed, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x038c: 0x00ee, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x038e: 0x00ef, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x038f: 0x00f0, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x0391: 0x0080, # GREEK CAPITAL LETTER ALPHA
0x0392: 0x0081, # GREEK CAPITAL LETTER BETA
0x0393: 0x0082, # GREEK CAPITAL LETTER GAMMA
0x0394: 0x0083, # GREEK CAPITAL LETTER DELTA
0x0395: 0x0084, # GREEK CAPITAL LETTER EPSILON
0x0396: 0x0085, # GREEK CAPITAL LETTER ZETA
0x0397: 0x0086, # GREEK CAPITAL LETTER ETA
0x0398: 0x0087, # GREEK CAPITAL LETTER THETA
0x0399: 0x0088, # GREEK CAPITAL LETTER IOTA
0x039a: 0x0089, # GREEK CAPITAL LETTER KAPPA
0x039b: 0x008a, # GREEK CAPITAL LETTER LAMDA
0x039c: 0x008b, # GREEK CAPITAL LETTER MU
0x039d: 0x008c, # GREEK CAPITAL LETTER NU
0x039e: 0x008d, # GREEK CAPITAL LETTER XI
0x039f: 0x008e, # GREEK CAPITAL LETTER OMICRON
0x03a0: 0x008f, # GREEK CAPITAL LETTER PI
0x03a1: 0x0090, # GREEK CAPITAL LETTER RHO
0x03a3: 0x0091, # GREEK CAPITAL LETTER SIGMA
0x03a4: 0x0092, # GREEK CAPITAL LETTER TAU
0x03a5: 0x0093, # GREEK CAPITAL LETTER UPSILON
0x03a6: 0x0094, # GREEK CAPITAL LETTER PHI
0x03a7: 0x0095, # GREEK CAPITAL LETTER CHI
0x03a8: 0x0096, # GREEK CAPITAL LETTER PSI
0x03a9: 0x0097, # GREEK CAPITAL LETTER OMEGA
0x03aa: 0x00f4, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x03ab: 0x00f5, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x03ac: 0x00e1, # GREEK SMALL LETTER ALPHA WITH TONOS
0x03ad: 0x00e2, # GREEK SMALL LETTER EPSILON WITH TONOS
0x03ae: 0x00e3, # GREEK SMALL LETTER ETA WITH TONOS
0x03af: 0x00e5, # GREEK SMALL LETTER IOTA WITH TONOS
0x03b1: 0x0098, # GREEK SMALL LETTER ALPHA
0x03b2: 0x0099, # GREEK SMALL LETTER BETA
0x03b3: 0x009a, # GREEK SMALL LETTER GAMMA
0x03b4: 0x009b, # GREEK SMALL LETTER DELTA
0x03b5: 0x009c, # GREEK SMALL LETTER EPSILON
0x03b6: 0x009d, # GREEK SMALL LETTER ZETA
0x03b7: 0x009e, # GREEK SMALL LETTER ETA
0x03b8: 0x009f, # GREEK SMALL LETTER THETA
0x03b9: 0x00a0, # GREEK SMALL LETTER IOTA
0x03ba: 0x00a1, # GREEK SMALL LETTER KAPPA
0x03bb: 0x00a2, # GREEK SMALL LETTER LAMDA
0x03bc: 0x00a3, # GREEK SMALL LETTER MU
0x03bd: 0x00a4, # GREEK SMALL LETTER NU
0x03be: 0x00a5, # GREEK SMALL LETTER XI
0x03bf: 0x00a6, # GREEK SMALL LETTER OMICRON
0x03c0: 0x00a7, # GREEK SMALL LETTER PI
0x03c1: 0x00a8, # GREEK SMALL LETTER RHO
0x03c2: 0x00aa, # GREEK SMALL LETTER FINAL SIGMA
0x03c3: 0x00a9, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00ab, # GREEK SMALL LETTER TAU
0x03c5: 0x00ac, # GREEK SMALL LETTER UPSILON
0x03c6: 0x00ad, # GREEK SMALL LETTER PHI
0x03c7: 0x00ae, # GREEK SMALL LETTER CHI
0x03c8: 0x00af, # GREEK SMALL LETTER PSI
0x03c9: 0x00e0, # GREEK SMALL LETTER OMEGA
0x03ca: 0x00e4, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x03cb: 0x00e8, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x03cc: 0x00e6, # GREEK SMALL LETTER OMICRON WITH TONOS
0x03cd: 0x00e7, # GREEK SMALL LETTER UPSILON WITH TONOS
0x03ce: 0x00e9, # GREEK SMALL LETTER OMEGA WITH TONOS
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
aeliot/openthread | tests/scripts/thread-cert/network_layer.py | 10 | 8233 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import io
import struct
from binascii import hexlify
from enum import IntEnum
import common
class TlvType(IntEnum):
TARGET_EID = 0
MAC_EXTENDED_ADDRESS = 1
RLOC16 = 2
ML_EID = 3
STATUS = 4
TIME_SINCE_LAST_TRANSACTION = 6
ROUTER_MASK = 7
ND_OPTION = 8
ND_DATA = 9
THREAD_NETWORK_DATA = 10
MLE_ROUTING = 11
class StatusValues(IntEnum):
SUCCESS = 0
NO_ADDRESS_AVAILABLE = 1
TOO_FEW_ROUTERS = 2
HAVE_CHILD_ID_REQUEST = 3
PARENT_PARTITION_CHANGE = 4
class TargetEid(object):
def __init__(self, eid):
self._eid = eid
@property
def eid(self):
return self._eid
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.eid == other.eid
def __repr__(self):
return "TargetEid(eid={})".format(hexlify(self.eid))
class TargetEidFactory(object):
def parse(self, data, message_info):
eid = bytearray(data.read(16))
return TargetEid(eid)
class MacExtendedAddress(object):
def __init__(self, mac_address):
self._mac_address = mac_address
@property
def mac_address(self):
return self._mac_address
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.mac_address == other.mac_address
def __repr__(self):
return "MacExtendedAddress(mac_address={})".format(hexlify(self.mac_address))
class MacExtendedAddressFactory(object):
def parse(self, data, message_info):
mac_address = bytearray(data.read(8))
return MacExtendedAddress(mac_address)
class Rloc16(object):
def __init__(self, rloc16):
self._rloc16 = rloc16
@property
def rloc16(self):
return self._rloc16
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.rloc16 == other.rloc16
def __repr__(self):
return "Rloc16(rloc16={})".format(hex(self.rloc16))
class Rloc16Factory(object):
def parse(self, data, message_info):
rloc16 = struct.unpack(">H", data.read(2))[0]
return Rloc16(rloc16)
class MlEid(object):
def __init__(self, ml_eid):
self._ml_eid = ml_eid
@property
def ml_eid(self):
return self._ml_eid
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.ml_eid == other.ml_eid
def __repr__(self):
return "MlEid(ml_eid={})".format(hexlify(self.ml_eid))
class MlEidFactory(object):
def parse(self, data, message_info):
ml_eid = bytearray(data.read(8))
return MlEid(ml_eid)
class Status(object):
def __init__(self, status):
self._status = status
@property
def status(self):
return self._status
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.status == other.status
def __repr__(self):
return "Status(status={})".format(self.status)
class StatusFactory(object):
def parse(self, data, message_info):
status = StatusValues(ord(data.read(1)))
return Status(status)
class TimeSinceLastTransaction(object):
def __init__(self, seconds):
self._seconds = seconds
@property
def seconds(self):
return self._seconds
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.seconds == other.seconds
def __repr__(self):
return "TimeSinceLastTransaction(seconds={})".format(self.seconds)
class TimeSinceLastTransactionFactory(object):
def parse(self, data, message_info):
seconds = struct.unpack(">L", data.read(4))[0]
return TimeSinceLastTransaction(seconds)
class RouterMask(object):
def __init__(self, id_sequence, router_id_mask):
self._id_sequence = id_sequence
self._router_id_mask = router_id_mask
@property
def id_sequence(self):
return self._id_sequence
@property
def router_id_mask(self):
return self._router_id_mask
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.id_sequence == other.id_sequence and self.router_id_mask == other.router_id_mask
def __repr__(self):
return "RouterMask(id_sequence={}, router_id_mask={})".format(self.id_sequence, hex(self.router_id_mask))
class RouterMaskFactory(object):
def parse(self, data, message_info):
id_sequence = ord(data.read(1))
router_id_mask = struct.unpack(">Q", data.read(8))[0]
return RouterMask(id_sequence, router_id_mask)
class NdOption(object):
def __init__(self, options):
self._options = options
@property
def options(self):
return self._options
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.options == other.options
def __repr__(self):
return "NdOption(options=[{}])".format(", ".join([str(opt) for opt in self.options]))
class NdOptionFactory(object):
def parse(self, data, message_info):
options = [opt for opt in bytearray(data.read())]
return NdOption(options)
class NdData(object):
# TODO: Not implemented yet
pass
class NdDataFactory(object):
# TODO: Not implemented yet
def parse(self, data, message_info):
raise NotImplementedError("TODO: Not implemented yet")
class ThreadNetworkData(object):
def __init__(self, tlvs):
self._tlvs = tlvs
@property
def tlvs(self):
return self._tlvs
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.tlvs == other.tlvs
def __repr__(self):
return "ThreadNetworkData(tlvs=[{}])".format(", ".join([str(tlv) for tlv in self.tlvs]))
class ThreadNetworkDataFactory(object):
def __init__(self, network_data_tlvs_factory):
self._network_data_tlvs_factory = network_data_tlvs_factory
def parse(self, data, message_info):
tlvs = self._network_data_tlvs_factory.parse(data, message_info)
return ThreadNetworkData(tlvs)
class NetworkLayerTlvsFactory(object):
def __init__(self, tlvs_factories):
self._tlvs_factories = tlvs_factories
def parse(self, data, message_info):
tlvs = []
while data.tell() < len(data.getvalue()):
_type = ord(data.read(1))
length = ord(data.read(1))
factory = self._tlvs_factories[_type]
tlv = factory.parse(io.BytesIO(data.read(length)), message_info)
tlvs.append(tlv)
return tlvs
| bsd-3-clause |
Censio/filterpy | filterpy/common/tests/test_discretization.py | 1 | 2566 | # -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from filterpy.common import linear_ode_discretation, Q_discrete_white_noise
from numpy import array
def near_eq(x,y):
return abs(x-y) < 1.e-17
def test_Q_discrete_white_noise():
Q = Q_discrete_white_noise (2)
assert Q[0,0] == .25
assert Q[1,0] == .5
assert Q[0,1] == .5
assert Q[1,1] == 1
assert Q.shape == (2,2)
def test_linear_ode():
F = array([[0,0,1,0,0,0],
[0,0,0,1,0,0],
[0,0,0,0,1,0],
[0,0,0,0,0,1],
[0,0,0,0,0,0],
[0,0,0,0,0,0]], dtype=float)
L = array ([[0,0],
[0,0],
[0,0],
[0,0],
[1,0],
[0,1]], dtype=float)
q = .2
Q = array([[q, 0],[0, q]])
dt = 0.5
A,Q = linear_ode_discretation(F, L, Q, dt)
val = [1, 0, dt, 0, 0.5*dt**2, 0]
for i in range(6):
assert val[i] == A[0,i]
for i in range(6):
assert val[i-1] == A[1,i] if i > 0 else A[1,i] == 0
for i in range(6):
assert val[i-2] == A[2,i] if i > 1 else A[2,i] == 0
for i in range(6):
assert val[i-3] == A[3,i] if i > 2 else A[3,i] == 0
for i in range(6):
assert val[i-4] == A[4,i] if i > 3 else A[4,i] == 0
for i in range(6):
assert val[i-5] == A[5,i] if i > 4 else A[5,i] == 0
assert near_eq(Q[0,0], (1./20)*(dt**5)*q)
assert near_eq(Q[0,1], 0)
assert near_eq(Q[0,2], (1/8)*(dt**4)*q)
assert near_eq(Q[0,3], 0)
assert near_eq(Q[0,4], (1./6)*(dt**3)*q)
assert near_eq(Q[0,5], 0)
if __name__ == "__main__":
test_linear_ode()
test_Q_discrete_white_noise()
F = array([[0,0,1,0,0,0],
[0,0,0,1,0,0],
[0,0,0,0,1,0],
[0,0,0,0,0,1],
[0,0,0,0,0,0],
[0,0,0,0,0,0]], dtype=float)
L = array ([[0,0],
[0,0],
[0,0],
[0,0],
[1,0],
[0,1]], dtype=float)
q = .2
Q = array([[q, 0],[0, q]])
dt = 1/30
A,Q = linear_ode_discretation(F, L, Q, dt)
print(Q) | mit |
boada/planckClusters | MOSAICpipe/bpz-1.99.3/prior_full.py | 1 | 3446 | from __future__ import print_function
from __future__ import division
from past.utils import old_div
from useful import match_resol
import numpy
import sys
# Hacked to use numpy and avoid import * commands
# FM
Float = numpy.float
less = numpy.less
def function(z, m, nt):
"""HDFN prior for the main six types of Benitez 2000
Returns an array pi[z[:],:6]
The input magnitude is F814W AB
"""
if nt != 6:
print("Wrong number of template spectra!")
sys.exit()
global zt_at_a
global zt_at_1p5
global zt_at_2
nz = len(z)
momin_hdf = 20.
if m <= 20.:
xm = numpy.arange(12., 18.0)
ft = numpy.array((0.55, 0.21, 0.21, .01, .01, .01))
zm0 = numpy.array([0.021, 0.034, 0.056, 0.0845, 0.1155, 0.127]) * (
old_div(2., 3.))
if len(ft) != nt:
print("Wrong number of templates!")
sys.exit()
nz = len(z)
m = numpy.array([m]) # match_resol works with arrays
m = numpy.clip(m, xm[0], xm[-1])
zm = match_resol(xm, zm0, m)
try:
zt_2.shape
except NameError:
t2 = [2.] * nt
zt_2 = numpy.power.outer(z, t2)
try:
zt_1p5.shape
except NameError:
t1p5 = [1.5] * nt
zt_1p5 = numpy.power.outer(z, t1p5)
zm_3 = numpy.power.outer(zm, 3)
zm_1p5 = numpy.power.outer(zm, 1.5)
p_i = 3. / 2. / zm_3 * zt_2[:, :] * numpy.exp(-numpy.clip(
old_div(zt_1p5[:, :], zm_1p5), 0., 700.))
norm = numpy.add.reduce(p_i[:nz, :], 0)
#Get rid of very low probability levels
p_i[:nz, :] = numpy.where(
numpy.less(
old_div(p_i[:nz, :], norm[:]), old_div(1e-5, float(nz))), 0.,
old_div(p_i[:nz, :], norm[:]))
norm = numpy.add.reduce(p_i[:nz, :], 0)
return p_i[:nz, :] / norm[:] * ft[:]
else:
m = numpy.minimum(numpy.maximum(20., m), 32)
a = numpy.array((2.465, 1.806, 1.806, 0.906, 0.906, 0.906))
zo = numpy.array((0.431, 0.390, 0.390, 0.0626, 0.0626, 0.0626))
km = numpy.array((0.0913, 0.0636, 0.0636, 0.123, 0.123, 0.123))
fo_t = numpy.array((0.35, 0.25, 0.25))
k_t = numpy.array((0.450, 0.147, 0.147))
dm = m - momin_hdf
zmt = numpy.clip(zo + km * dm, 0.01, 15.)
zmt_at_a = zmt**(a)
#We define z**a as global to keep it
#between function calls. That way it is
# estimated only once
try:
zt_at_a.shape
except NameError:
zt_at_a = numpy.power.outer(z, a)
#Morphological fractions
f_t = numpy.zeros((len(a), ), Float)
f_t[:3] = fo_t * numpy.exp(-k_t * dm)
f_t[3:] = old_div((1. - numpy.add.reduce(f_t[:3])), 3.)
#Formula:
#zm=zo+km*(m_m_min)
#p(z|T,m)=(z**a)*numpy.exp(-(z/zm)**a)
p_i = zt_at_a[:nz, :6] * numpy.exp(-numpy.clip(
old_div(zt_at_a[:nz, :6], zmt_at_a[:6]), 0., 700.))
#This eliminates the very low level tails of the priors
norm = numpy.add.reduce(p_i[:nz, :6], 0)
p_i[:nz, :6] = numpy.where(
less(
old_div(p_i[:nz, :6], norm[:6]), old_div(1e-2, float(nz))), 0.,
old_div(p_i[:nz, :6], norm[:6]))
norm = numpy.add.reduce(p_i[:nz, :6], 0)
p_i[:nz, :6] = p_i[:nz, :6] / norm[:6] * f_t[:6]
return p_i
| mit |
Lucifer-Kim/scrapy | extras/qps-bench-server.py | 178 | 1640 | #!/usr/bin/env python
from __future__ import print_function
from time import time
from collections import deque
from twisted.web.server import Site, NOT_DONE_YET
from twisted.web.resource import Resource
from twisted.internet import reactor
class Root(Resource):
def __init__(self):
Resource.__init__(self)
self.concurrent = 0
self.tail = deque(maxlen=100)
self._reset_stats()
def _reset_stats(self):
self.tail.clear()
self.start = self.lastmark = self.lasttime = time()
def getChild(self, request, name):
return self
def render(self, request):
now = time()
delta = now - self.lasttime
# reset stats on high iter-request times caused by client restarts
if delta > 3: # seconds
self._reset_stats()
return ''
self.tail.appendleft(delta)
self.lasttime = now
self.concurrent += 1
if now - self.lastmark >= 3:
self.lastmark = now
qps = len(self.tail) / sum(self.tail)
print('samplesize={0} concurrent={1} qps={2:0.2f}'.format(len(self.tail), self.concurrent, qps))
if 'latency' in request.args:
latency = float(request.args['latency'][0])
reactor.callLater(latency, self._finish, request)
return NOT_DONE_YET
self.concurrent -= 1
return ''
def _finish(self, request):
self.concurrent -= 1
if not request.finished and not request._disconnected:
request.finish()
root = Root()
factory = Site(root)
reactor.listenTCP(8880, factory)
reactor.run()
| bsd-3-clause |
yajiedesign/p2pool | p2pool/bitcoin/data.py | 185 | 10220 | from __future__ import division
import hashlib
import random
import warnings
import p2pool
from p2pool.util import math, pack
def hash256(data):
return pack.IntType(256).unpack(hashlib.sha256(hashlib.sha256(data).digest()).digest())
def hash160(data):
if data == '04ffd03de44a6e11b9917f3a29f9443283d9871c9d743ef30d5eddcd37094b64d1b3d8090496b53256786bf5c82932ec23c3b74d9f05a6f95a8b5529352656664b'.decode('hex'):
return 0x384f570ccc88ac2e7e00b026d1690a3fca63dd0 # hack for people who don't have openssl - this is the only value that p2pool ever hashes
return pack.IntType(160).unpack(hashlib.new('ripemd160', hashlib.sha256(data).digest()).digest())
class ChecksummedType(pack.Type):
def __init__(self, inner, checksum_func=lambda data: hashlib.sha256(hashlib.sha256(data).digest()).digest()[:4]):
self.inner = inner
self.checksum_func = checksum_func
def read(self, file):
obj, file = self.inner.read(file)
data = self.inner.pack(obj)
calculated_checksum = self.checksum_func(data)
checksum, file = pack.read(file, len(calculated_checksum))
if checksum != calculated_checksum:
raise ValueError('invalid checksum')
return obj, file
def write(self, file, item):
data = self.inner.pack(item)
return (file, data), self.checksum_func(data)
class FloatingInteger(object):
__slots__ = ['bits', '_target']
@classmethod
def from_target_upper_bound(cls, target):
n = math.natural_to_string(target)
if n and ord(n[0]) >= 128:
n = '\x00' + n
bits2 = (chr(len(n)) + (n + 3*chr(0))[:3])[::-1]
bits = pack.IntType(32).unpack(bits2)
return cls(bits)
def __init__(self, bits, target=None):
self.bits = bits
self._target = None
if target is not None and self.target != target:
raise ValueError('target does not match')
@property
def target(self):
res = self._target
if res is None:
res = self._target = math.shift_left(self.bits & 0x00ffffff, 8 * ((self.bits >> 24) - 3))
return res
def __hash__(self):
return hash(self.bits)
def __eq__(self, other):
return self.bits == other.bits
def __ne__(self, other):
return not (self == other)
def __cmp__(self, other):
assert False
def __repr__(self):
return 'FloatingInteger(bits=%s, target=%s)' % (hex(self.bits), hex(self.target))
class FloatingIntegerType(pack.Type):
_inner = pack.IntType(32)
def read(self, file):
bits, file = self._inner.read(file)
return FloatingInteger(bits), file
def write(self, file, item):
return self._inner.write(file, item.bits)
address_type = pack.ComposedType([
('services', pack.IntType(64)),
('address', pack.IPV6AddressType()),
('port', pack.IntType(16, 'big')),
])
tx_type = pack.ComposedType([
('version', pack.IntType(32)),
('tx_ins', pack.ListType(pack.ComposedType([
('previous_output', pack.PossiblyNoneType(dict(hash=0, index=2**32 - 1), pack.ComposedType([
('hash', pack.IntType(256)),
('index', pack.IntType(32)),
]))),
('script', pack.VarStrType()),
('sequence', pack.PossiblyNoneType(2**32 - 1, pack.IntType(32))),
]))),
('tx_outs', pack.ListType(pack.ComposedType([
('value', pack.IntType(64)),
('script', pack.VarStrType()),
]))),
('lock_time', pack.IntType(32)),
])
merkle_link_type = pack.ComposedType([
('branch', pack.ListType(pack.IntType(256))),
('index', pack.IntType(32)),
])
merkle_tx_type = pack.ComposedType([
('tx', tx_type),
('block_hash', pack.IntType(256)),
('merkle_link', merkle_link_type),
])
block_header_type = pack.ComposedType([
('version', pack.IntType(32)),
('previous_block', pack.PossiblyNoneType(0, pack.IntType(256))),
('merkle_root', pack.IntType(256)),
('timestamp', pack.IntType(32)),
('bits', FloatingIntegerType()),
('nonce', pack.IntType(32)),
])
block_type = pack.ComposedType([
('header', block_header_type),
('txs', pack.ListType(tx_type)),
])
# merged mining
aux_pow_type = pack.ComposedType([
('merkle_tx', merkle_tx_type),
('merkle_link', merkle_link_type),
('parent_block_header', block_header_type),
])
aux_pow_coinbase_type = pack.ComposedType([
('merkle_root', pack.IntType(256, 'big')),
('size', pack.IntType(32)),
('nonce', pack.IntType(32)),
])
def make_auxpow_tree(chain_ids):
for size in (2**i for i in xrange(31)):
if size < len(chain_ids):
continue
res = {}
for chain_id in chain_ids:
pos = (1103515245 * chain_id + 1103515245 * 12345 + 12345) % size
if pos in res:
break
res[pos] = chain_id
else:
return res, size
raise AssertionError()
# merkle trees
merkle_record_type = pack.ComposedType([
('left', pack.IntType(256)),
('right', pack.IntType(256)),
])
def merkle_hash(hashes):
if not hashes:
return 0
hash_list = list(hashes)
while len(hash_list) > 1:
hash_list = [hash256(merkle_record_type.pack(dict(left=left, right=right)))
for left, right in zip(hash_list[::2], hash_list[1::2] + [hash_list[::2][-1]])]
return hash_list[0]
def calculate_merkle_link(hashes, index):
# XXX optimize this
hash_list = [(lambda _h=h: _h, i == index, []) for i, h in enumerate(hashes)]
while len(hash_list) > 1:
hash_list = [
(
lambda _left=left, _right=right: hash256(merkle_record_type.pack(dict(left=_left(), right=_right()))),
left_f or right_f,
(left_l if left_f else right_l) + [dict(side=1, hash=right) if left_f else dict(side=0, hash=left)],
)
for (left, left_f, left_l), (right, right_f, right_l) in
zip(hash_list[::2], hash_list[1::2] + [hash_list[::2][-1]])
]
res = [x['hash']() for x in hash_list[0][2]]
assert hash_list[0][1]
if p2pool.DEBUG:
new_hashes = [random.randrange(2**256) if x is None else x
for x in hashes]
assert check_merkle_link(new_hashes[index], dict(branch=res, index=index)) == merkle_hash(new_hashes)
assert index == sum(k*2**i for i, k in enumerate([1-x['side'] for x in hash_list[0][2]]))
return dict(branch=res, index=index)
def check_merkle_link(tip_hash, link):
if link['index'] >= 2**len(link['branch']):
raise ValueError('index too large')
return reduce(lambda c, (i, h): hash256(merkle_record_type.pack(
dict(left=h, right=c) if (link['index'] >> i) & 1 else
dict(left=c, right=h)
)), enumerate(link['branch']), tip_hash)
# targets
def target_to_average_attempts(target):
assert 0 <= target and isinstance(target, (int, long)), target
if target >= 2**256: warnings.warn('target >= 2**256!')
return 2**256//(target + 1)
def average_attempts_to_target(average_attempts):
assert average_attempts > 0
return min(int(2**256/average_attempts - 1 + 0.5), 2**256-1)
def target_to_difficulty(target):
assert 0 <= target and isinstance(target, (int, long)), target
if target >= 2**256: warnings.warn('target >= 2**256!')
return (0xffff0000 * 2**(256-64) + 1)/(target + 1)
def difficulty_to_target(difficulty):
assert difficulty >= 0
if difficulty == 0: return 2**256-1
return min(int((0xffff0000 * 2**(256-64) + 1)/difficulty - 1 + 0.5), 2**256-1)
# human addresses
base58_alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def base58_encode(bindata):
bindata2 = bindata.lstrip(chr(0))
return base58_alphabet[0]*(len(bindata) - len(bindata2)) + math.natural_to_string(math.string_to_natural(bindata2), base58_alphabet)
def base58_decode(b58data):
b58data2 = b58data.lstrip(base58_alphabet[0])
return chr(0)*(len(b58data) - len(b58data2)) + math.natural_to_string(math.string_to_natural(b58data2, base58_alphabet))
human_address_type = ChecksummedType(pack.ComposedType([
('version', pack.IntType(8)),
('pubkey_hash', pack.IntType(160)),
]))
def pubkey_hash_to_address(pubkey_hash, net):
return base58_encode(human_address_type.pack(dict(version=net.ADDRESS_VERSION, pubkey_hash=pubkey_hash)))
def pubkey_to_address(pubkey, net):
return pubkey_hash_to_address(hash160(pubkey), net)
def address_to_pubkey_hash(address, net):
x = human_address_type.unpack(base58_decode(address))
if x['version'] != net.ADDRESS_VERSION:
raise ValueError('address not for this net!')
return x['pubkey_hash']
# transactions
def pubkey_to_script2(pubkey):
assert len(pubkey) <= 75
return (chr(len(pubkey)) + pubkey) + '\xac'
def pubkey_hash_to_script2(pubkey_hash):
return '\x76\xa9' + ('\x14' + pack.IntType(160).pack(pubkey_hash)) + '\x88\xac'
def script2_to_address(script2, net):
try:
pubkey = script2[1:-1]
script2_test = pubkey_to_script2(pubkey)
except:
pass
else:
if script2_test == script2:
return pubkey_to_address(pubkey, net)
try:
pubkey_hash = pack.IntType(160).unpack(script2[3:-2])
script2_test2 = pubkey_hash_to_script2(pubkey_hash)
except:
pass
else:
if script2_test2 == script2:
return pubkey_hash_to_address(pubkey_hash, net)
def script2_to_human(script2, net):
try:
pubkey = script2[1:-1]
script2_test = pubkey_to_script2(pubkey)
except:
pass
else:
if script2_test == script2:
return 'Pubkey. Address: %s' % (pubkey_to_address(pubkey, net),)
try:
pubkey_hash = pack.IntType(160).unpack(script2[3:-2])
script2_test2 = pubkey_hash_to_script2(pubkey_hash)
except:
pass
else:
if script2_test2 == script2:
return 'Address. Address: %s' % (pubkey_hash_to_address(pubkey_hash, net),)
return 'Unknown. Script: %s' % (script2.encode('hex'),)
| gpl-3.0 |
miipl-naveen/optibizz | addons/warning/warning.py | 243 | 11529 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.tools.translate import _
WARNING_MESSAGE = [
('no-message','No Message'),
('warning','Warning'),
('block','Blocking Message')
]
WARNING_HELP = _('Selecting the "Warning" option will notify user with the message, Selecting "Blocking Message" will throw an exception with the message and block the flow. The Message has to be written in the next field.')
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'sale_warn' : fields.selection(WARNING_MESSAGE, 'Sales Order', help=WARNING_HELP, required=True),
'sale_warn_msg' : fields.text('Message for Sales Order'),
'purchase_warn' : fields.selection(WARNING_MESSAGE, 'Purchase Order', help=WARNING_HELP, required=True),
'purchase_warn_msg' : fields.text('Message for Purchase Order'),
'picking_warn' : fields.selection(WARNING_MESSAGE, 'Stock Picking', help=WARNING_HELP, required=True),
'picking_warn_msg' : fields.text('Message for Stock Picking'),
'invoice_warn' : fields.selection(WARNING_MESSAGE, 'Invoice', help=WARNING_HELP, required=True),
'invoice_warn_msg' : fields.text('Message for Invoice'),
}
_defaults = {
'sale_warn' : 'no-message',
'purchase_warn' : 'no-message',
'picking_warn' : 'no-message',
'invoice_warn' : 'no-message',
}
class sale_order(osv.osv):
_inherit = 'sale.order'
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not part:
return {'value':{'partner_invoice_id': False, 'partner_shipping_id':False, 'payment_term' : False}}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, part, context=context)
if partner.sale_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.sale_warn_msg
warning = {
'title': title,
'message': message,
}
if partner.sale_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(sale_order, self).onchange_partner_id(cr, uid, ids, part, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
class purchase_order(osv.osv):
_inherit = 'purchase.order'
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not part:
return {'value':{'partner_address_id': False}}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, part, context=context)
if partner.purchase_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.purchase_warn_msg
warning = {
'title': title,
'message': message
}
if partner.purchase_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(purchase_order, self).onchange_partner_id(cr, uid, ids, part, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def onchange_partner_id(self, cr, uid, ids, type, partner_id,
date_invoice=False, payment_term=False,
partner_bank_id=False, company_id=False,
context=None):
if not partner_id:
return {'value': {
'account_id': False,
'payment_term': False,
}
}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
if partner.invoice_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.invoice_warn_msg
warning = {
'title': title,
'message': message
}
if partner.invoice_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id,
date_invoice=date_invoice, payment_term=payment_term,
partner_bank_id=partner_bank_id, company_id=company_id, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def onchange_partner_in(self, cr, uid, ids, partner_id=None, context=None):
if not partner_id:
return {}
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
warning = {}
title = False
message = False
if partner.picking_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.picking_warn_msg
warning = {
'title': title,
'message': message
}
if partner.picking_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = {'value': {}}
if warning:
result['warning'] = warning
return result
class product_product(osv.osv):
_inherit = 'product.template'
_columns = {
'sale_line_warn' : fields.selection(WARNING_MESSAGE,'Sales Order Line', help=WARNING_HELP, required=True),
'sale_line_warn_msg' : fields.text('Message for Sales Order Line'),
'purchase_line_warn' : fields.selection(WARNING_MESSAGE,'Purchase Order Line', help=WARNING_HELP, required=True),
'purchase_line_warn_msg' : fields.text('Message for Purchase Order Line'),
}
_defaults = {
'sale_line_warn' : 'no-message',
'purchase_line_warn' : 'no-message',
}
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
def product_id_change_with_wh(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False,
fiscal_position=False, flag=False, warehouse_id=False, context=None):
warning = {}
if not product:
return {'value': {'th_weight' : 0, 'product_packaging': False,
'product_uos_qty': qty}, 'domain': {'product_uom': [],
'product_uos': []}}
product_obj = self.pool.get('product.product')
product_info = product_obj.browse(cr, uid, product)
title = False
message = False
if product_info.sale_line_warn != 'no-message':
title = _("Warning for %s") % product_info.name
message = product_info.sale_line_warn_msg
warning['title'] = title
warning['message'] = message
if product_info.sale_line_warn == 'block':
return {'value': {'product_id': False}, 'warning': warning}
result = super(sale_order_line, self).product_id_change_with_wh( cr, uid, ids, pricelist, product, qty,
uom, qty_uos, uos, name, partner_id,
lang, update_tax, date_order, packaging, fiscal_position, flag, warehouse_id=warehouse_id, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+result['warning']['title'] or result['warning']['title']
warning['message'] = message and message +'\n\n'+result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
class purchase_order_line(osv.osv):
_inherit = 'purchase.order.line'
def onchange_product_id(self,cr, uid, ids, pricelist, product, qty, uom,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', context=None):
warning = {}
if not product:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom or False}, 'domain':{'product_uom':[]}}
product_obj = self.pool.get('product.product')
product_info = product_obj.browse(cr, uid, product)
title = False
message = False
if product_info.purchase_line_warn != 'no-message':
title = _("Warning for %s") % product_info.name
message = product_info.purchase_line_warn_msg
warning['title'] = title
warning['message'] = message
if product_info.purchase_line_warn == 'block':
return {'value': {'product_id': False}, 'warning': warning}
result = super(purchase_order_line, self).onchange_product_id(cr, uid, ids, pricelist, product, qty, uom,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned, name=name, price_unit=price_unit, state=state, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+result['warning']['title'] or result['warning']['title']
warning['message'] = message and message +'\n\n'+result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
brandond/ansible | lib/ansible/modules/network/avi/avi_analyticsprofile.py | 29 | 29145 | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_analyticsprofile
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of AnalyticsProfile Avi RESTful Object
description:
- This module is used to configure AnalyticsProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
apdex_response_threshold:
description:
- If a client receives an http response in less than the satisfactory latency threshold, the request is considered satisfied.
- It is considered tolerated if it is not satisfied and less than tolerated latency factor multiplied by the satisfactory latency threshold.
- Greater than this number and the client's request is considered frustrated.
- Allowed values are 1-30000.
- Default value when not specified in API or module is interpreted by Avi Controller as 500.
- Units(MILLISECONDS).
apdex_response_tolerated_factor:
description:
- Client tolerated response latency factor.
- Client must receive a response within this factor times the satisfactory threshold (apdex_response_threshold) to be considered tolerated.
- Allowed values are 1-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
apdex_rtt_threshold:
description:
- Satisfactory client to avi round trip time(rtt).
- Allowed values are 1-2000.
- Default value when not specified in API or module is interpreted by Avi Controller as 250.
- Units(MILLISECONDS).
apdex_rtt_tolerated_factor:
description:
- Tolerated client to avi round trip time(rtt) factor.
- It is a multiple of apdex_rtt_tolerated_factor.
- Allowed values are 1-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
apdex_rum_threshold:
description:
- If a client is able to load a page in less than the satisfactory latency threshold, the pageload is considered satisfied.
- It is considered tolerated if it is greater than satisfied but less than the tolerated latency multiplied by satisifed latency.
- Greater than this number and the client's request is considered frustrated.
- A pageload includes the time for dns lookup, download of all http objects, and page render time.
- Allowed values are 1-30000.
- Default value when not specified in API or module is interpreted by Avi Controller as 5000.
- Units(MILLISECONDS).
apdex_rum_tolerated_factor:
description:
- Virtual service threshold factor for tolerated page load time (plt) as multiple of apdex_rum_threshold.
- Allowed values are 1-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
apdex_server_response_threshold:
description:
- A server http response is considered satisfied if latency is less than the satisfactory latency threshold.
- The response is considered tolerated when it is greater than satisfied but less than the tolerated latency factor * s_latency.
- Greater than this number and the server response is considered frustrated.
- Allowed values are 1-30000.
- Default value when not specified in API or module is interpreted by Avi Controller as 400.
- Units(MILLISECONDS).
apdex_server_response_tolerated_factor:
description:
- Server tolerated response latency factor.
- Servermust response within this factor times the satisfactory threshold (apdex_server_response_threshold) to be considered tolerated.
- Allowed values are 1-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
apdex_server_rtt_threshold:
description:
- Satisfactory client to avi round trip time(rtt).
- Allowed values are 1-2000.
- Default value when not specified in API or module is interpreted by Avi Controller as 125.
- Units(MILLISECONDS).
apdex_server_rtt_tolerated_factor:
description:
- Tolerated client to avi round trip time(rtt) factor.
- It is a multiple of apdex_rtt_tolerated_factor.
- Allowed values are 1-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
client_log_config:
description:
- Configure which logs are sent to the avi controller from ses and how they are processed.
client_log_streaming_config:
description:
- Configure to stream logs to an external server.
- Field introduced in 17.1.1.
version_added: "2.4"
conn_lossy_ooo_threshold:
description:
- A connection between client and avi is considered lossy when more than this percentage of out of order packets are received.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
- Units(PERCENT).
conn_lossy_timeo_rexmt_threshold:
description:
- A connection between client and avi is considered lossy when more than this percentage of packets are retransmitted due to timeout.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 20.
- Units(PERCENT).
conn_lossy_total_rexmt_threshold:
description:
- A connection between client and avi is considered lossy when more than this percentage of packets are retransmitted.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
- Units(PERCENT).
conn_lossy_zero_win_size_event_threshold:
description:
- A client connection is considered lossy when percentage of times a packet could not be trasmitted due to tcp zero window is above this threshold.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
- Units(PERCENT).
conn_server_lossy_ooo_threshold:
description:
- A connection between avi and server is considered lossy when more than this percentage of out of order packets are received.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
- Units(PERCENT).
conn_server_lossy_timeo_rexmt_threshold:
description:
- A connection between avi and server is considered lossy when more than this percentage of packets are retransmitted due to timeout.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 20.
- Units(PERCENT).
conn_server_lossy_total_rexmt_threshold:
description:
- A connection between avi and server is considered lossy when more than this percentage of packets are retransmitted.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
- Units(PERCENT).
conn_server_lossy_zero_win_size_event_threshold:
description:
- A server connection is considered lossy when percentage of times a packet could not be trasmitted due to tcp zero window is above this threshold.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
- Units(PERCENT).
description:
description:
- User defined description for the object.
disable_se_analytics:
description:
- Disable node (service engine) level analytics forvs metrics.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
disable_server_analytics:
description:
- Disable analytics on backend servers.
- This may be desired in container environment when there are large number of ephemeral servers.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_client_close_before_request_as_error:
description:
- Exclude client closed connection before an http request could be completed from being classified as an error.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_dns_policy_drop_as_significant:
description:
- Exclude dns policy drops from the list of errors.
- Field introduced in 17.2.2.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.5"
type: bool
exclude_gs_down_as_error:
description:
- Exclude queries to gslb services that are operationally down from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_http_error_codes:
description:
- List of http status codes to be excluded from being classified as an error.
- Error connections or responses impacts health score, are included as significant logs, and may be classified as part of a dos attack.
exclude_invalid_dns_domain_as_error:
description:
- Exclude dns queries to domains outside the domains configured in the dns application profile from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_invalid_dns_query_as_error:
description:
- Exclude invalid dns queries from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_no_dns_record_as_error:
description:
- Exclude queries to domains that did not have configured services/records from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_no_valid_gs_member_as_error:
description:
- Exclude queries to gslb services that have no available members from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_persistence_change_as_error:
description:
- Exclude persistence server changed while load balancing' from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_server_dns_error_as_error:
description:
- Exclude server dns error response from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_server_tcp_reset_as_error:
description:
- Exclude server tcp reset from errors.
- It is common for applications like ms exchange.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_syn_retransmit_as_error:
description:
- Exclude 'server unanswered syns' from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_tcp_reset_as_error:
description:
- Exclude tcp resets by client from the list of potential errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_unsupported_dns_query_as_error:
description:
- Exclude unsupported dns queries from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
hs_event_throttle_window:
description:
- Time window (in secs) within which only unique health change events should occur.
- Default value when not specified in API or module is interpreted by Avi Controller as 1209600.
hs_max_anomaly_penalty:
description:
- Maximum penalty that may be deducted from health score for anomalies.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
hs_max_resources_penalty:
description:
- Maximum penalty that may be deducted from health score for high resource utilization.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 25.
hs_max_security_penalty:
description:
- Maximum penalty that may be deducted from health score based on security assessment.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 100.
hs_min_dos_rate:
description:
- Dos connection rate below which the dos security assessment will not kick in.
- Default value when not specified in API or module is interpreted by Avi Controller as 1000.
hs_performance_boost:
description:
- Adds free performance score credits to health score.
- It can be used for compensating health score for known slow applications.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
hs_pscore_traffic_threshold_l4_client:
description:
- Threshold number of connections in 5min, below which apdexr, apdexc, rum_apdex, and other network quality metrics are not computed.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.0.
hs_pscore_traffic_threshold_l4_server:
description:
- Threshold number of connections in 5min, below which apdexr, apdexc, rum_apdex, and other network quality metrics are not computed.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.0.
hs_security_certscore_expired:
description:
- Score assigned when the certificate has expired.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.0.
hs_security_certscore_gt30d:
description:
- Score assigned when the certificate expires in more than 30 days.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_certscore_le07d:
description:
- Score assigned when the certificate expires in less than or equal to 7 days.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.0.
hs_security_certscore_le30d:
description:
- Score assigned when the certificate expires in less than or equal to 30 days.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
hs_security_chain_invalidity_penalty:
description:
- Penalty for allowing certificates with invalid chain.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
hs_security_cipherscore_eq000b:
description:
- Score assigned when the minimum cipher strength is 0 bits.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.0.
hs_security_cipherscore_ge128b:
description:
- Score assigned when the minimum cipher strength is greater than equal to 128 bits.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_cipherscore_lt128b:
description:
- Score assigned when the minimum cipher strength is less than 128 bits.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 3.5.
hs_security_encalgo_score_none:
description:
- Score assigned when no algorithm is used for encryption.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.0.
hs_security_encalgo_score_rc4:
description:
- Score assigned when rc4 algorithm is used for encryption.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.5.
hs_security_hsts_penalty:
description:
- Penalty for not enabling hsts.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
hs_security_nonpfs_penalty:
description:
- Penalty for allowing non-pfs handshakes.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
hs_security_selfsignedcert_penalty:
description:
- Deprecated.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
hs_security_ssl30_score:
description:
- Score assigned when supporting ssl3.0 encryption protocol.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 3.5.
hs_security_tls10_score:
description:
- Score assigned when supporting tls1.0 encryption protocol.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_tls11_score:
description:
- Score assigned when supporting tls1.1 encryption protocol.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_tls12_score:
description:
- Score assigned when supporting tls1.2 encryption protocol.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_weak_signature_algo_penalty:
description:
- Penalty for allowing weak signature algorithm(s).
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
name:
description:
- The name of the analytics profile.
required: true
ranges:
description:
- List of http status code ranges to be excluded from being classified as an error.
resp_code_block:
description:
- Block of http response codes to be excluded from being classified as an error.
- Enum options - AP_HTTP_RSP_4XX, AP_HTTP_RSP_5XX.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the analytics profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a custom Analytics profile object
avi_analyticsprofile:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
apdex_response_threshold: 500
apdex_response_tolerated_factor: 4.0
apdex_rtt_threshold: 250
apdex_rtt_tolerated_factor: 4.0
apdex_rum_threshold: 5000
apdex_rum_tolerated_factor: 4.0
apdex_server_response_threshold: 400
apdex_server_response_tolerated_factor: 4.0
apdex_server_rtt_threshold: 125
apdex_server_rtt_tolerated_factor: 4.0
conn_lossy_ooo_threshold: 50
conn_lossy_timeo_rexmt_threshold: 20
conn_lossy_total_rexmt_threshold: 50
conn_lossy_zero_win_size_event_threshold: 2
conn_server_lossy_ooo_threshold: 50
conn_server_lossy_timeo_rexmt_threshold: 20
conn_server_lossy_total_rexmt_threshold: 50
conn_server_lossy_zero_win_size_event_threshold: 2
disable_se_analytics: false
disable_server_analytics: false
exclude_client_close_before_request_as_error: false
exclude_persistence_change_as_error: false
exclude_server_tcp_reset_as_error: false
exclude_syn_retransmit_as_error: false
exclude_tcp_reset_as_error: false
hs_event_throttle_window: 1209600
hs_max_anomaly_penalty: 10
hs_max_resources_penalty: 25
hs_max_security_penalty: 100
hs_min_dos_rate: 1000
hs_performance_boost: 20
hs_pscore_traffic_threshold_l4_client: 10.0
hs_pscore_traffic_threshold_l4_server: 10.0
hs_security_certscore_expired: 0.0
hs_security_certscore_gt30d: 5.0
hs_security_certscore_le07d: 2.0
hs_security_certscore_le30d: 4.0
hs_security_chain_invalidity_penalty: 1.0
hs_security_cipherscore_eq000b: 0.0
hs_security_cipherscore_ge128b: 5.0
hs_security_cipherscore_lt128b: 3.5
hs_security_encalgo_score_none: 0.0
hs_security_encalgo_score_rc4: 2.5
hs_security_hsts_penalty: 0.0
hs_security_nonpfs_penalty: 1.0
hs_security_selfsignedcert_penalty: 1.0
hs_security_ssl30_score: 3.5
hs_security_tls10_score: 5.0
hs_security_tls11_score: 5.0
hs_security_tls12_score: 5.0
hs_security_weak_signature_algo_penalty: 1.0
name: jason-analytics-profile
tenant_ref: Demo
"""
RETURN = '''
obj:
description: AnalyticsProfile (api/analyticsprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
apdex_response_threshold=dict(type='int',),
apdex_response_tolerated_factor=dict(type='float',),
apdex_rtt_threshold=dict(type='int',),
apdex_rtt_tolerated_factor=dict(type='float',),
apdex_rum_threshold=dict(type='int',),
apdex_rum_tolerated_factor=dict(type='float',),
apdex_server_response_threshold=dict(type='int',),
apdex_server_response_tolerated_factor=dict(type='float',),
apdex_server_rtt_threshold=dict(type='int',),
apdex_server_rtt_tolerated_factor=dict(type='float',),
client_log_config=dict(type='dict',),
client_log_streaming_config=dict(type='dict',),
conn_lossy_ooo_threshold=dict(type='int',),
conn_lossy_timeo_rexmt_threshold=dict(type='int',),
conn_lossy_total_rexmt_threshold=dict(type='int',),
conn_lossy_zero_win_size_event_threshold=dict(type='int',),
conn_server_lossy_ooo_threshold=dict(type='int',),
conn_server_lossy_timeo_rexmt_threshold=dict(type='int',),
conn_server_lossy_total_rexmt_threshold=dict(type='int',),
conn_server_lossy_zero_win_size_event_threshold=dict(type='int',),
description=dict(type='str',),
disable_se_analytics=dict(type='bool',),
disable_server_analytics=dict(type='bool',),
exclude_client_close_before_request_as_error=dict(type='bool',),
exclude_dns_policy_drop_as_significant=dict(type='bool',),
exclude_gs_down_as_error=dict(type='bool',),
exclude_http_error_codes=dict(type='list',),
exclude_invalid_dns_domain_as_error=dict(type='bool',),
exclude_invalid_dns_query_as_error=dict(type='bool',),
exclude_no_dns_record_as_error=dict(type='bool',),
exclude_no_valid_gs_member_as_error=dict(type='bool',),
exclude_persistence_change_as_error=dict(type='bool',),
exclude_server_dns_error_as_error=dict(type='bool',),
exclude_server_tcp_reset_as_error=dict(type='bool',),
exclude_syn_retransmit_as_error=dict(type='bool',),
exclude_tcp_reset_as_error=dict(type='bool',),
exclude_unsupported_dns_query_as_error=dict(type='bool',),
hs_event_throttle_window=dict(type='int',),
hs_max_anomaly_penalty=dict(type='int',),
hs_max_resources_penalty=dict(type='int',),
hs_max_security_penalty=dict(type='int',),
hs_min_dos_rate=dict(type='int',),
hs_performance_boost=dict(type='int',),
hs_pscore_traffic_threshold_l4_client=dict(type='float',),
hs_pscore_traffic_threshold_l4_server=dict(type='float',),
hs_security_certscore_expired=dict(type='float',),
hs_security_certscore_gt30d=dict(type='float',),
hs_security_certscore_le07d=dict(type='float',),
hs_security_certscore_le30d=dict(type='float',),
hs_security_chain_invalidity_penalty=dict(type='float',),
hs_security_cipherscore_eq000b=dict(type='float',),
hs_security_cipherscore_ge128b=dict(type='float',),
hs_security_cipherscore_lt128b=dict(type='float',),
hs_security_encalgo_score_none=dict(type='float',),
hs_security_encalgo_score_rc4=dict(type='float',),
hs_security_hsts_penalty=dict(type='float',),
hs_security_nonpfs_penalty=dict(type='float',),
hs_security_selfsignedcert_penalty=dict(type='float',),
hs_security_ssl30_score=dict(type='float',),
hs_security_tls10_score=dict(type='float',),
hs_security_tls11_score=dict(type='float',),
hs_security_tls12_score=dict(type='float',),
hs_security_weak_signature_algo_penalty=dict(type='float',),
name=dict(type='str', required=True),
ranges=dict(type='list',),
resp_code_block=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'analyticsprofile',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
msiedlarek/qtwebkit | Tools/Scripts/webkitpy/tool/commands/bugsearch.py | 124 | 2323 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.multicommandtool import Command
class BugSearch(Command):
name = "bug-search"
help_text = "List bugs matching a query"
argument_names = "QUERY"
long_help = \
"""Runs the bugzilla quicksearch QUERY on bugs.webkit.org, and lists all bugs
returned. QUERY can be as simple as a bug number or a comma delimited list of
bug numbers.
See https://bugzilla.mozilla.org/page.cgi?id=quicksearch.html for full
documentation on the query format."""
def execute(self, options, args, tool):
search_string = args[0]
bugs = tool.bugs.queries.fetch_bugs_matching_quicksearch(search_string)
for bug in bugs:
print "%5s %s" % (bug.id(), bug.title())
if not bugs:
print "No bugs found matching '%s'" % search_string
| lgpl-3.0 |
kutenai/django | tests/model_regress/tests.py | 35 | 8981 | from __future__ import unicode_literals
import datetime
from operator import attrgetter
from django.core.exceptions import ValidationError
from django.db import router
from django.db.models.sql import InsertQuery
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from django.utils.timezone import get_fixed_timezone
from .models import (
Article, BrokenUnicodeMethod, Department, Event, Model1, Model2, Model3,
NonAutoPK, Party, Worker,
)
class ModelTests(TestCase):
# The bug is that the following queries would raise:
# "TypeError: Related Field has invalid lookup: gte"
def test_related_gte_lookup(self):
"""
Regression test for #10153: foreign key __gte lookups.
"""
Worker.objects.filter(department__gte=0)
def test_related_lte_lookup(self):
"""
Regression test for #10153: foreign key __lte lookups.
"""
Worker.objects.filter(department__lte=0)
def test_sql_insert_compiler_return_id_attribute(self):
"""
Regression test for #14019: SQLInsertCompiler.as_sql() failure
"""
db = router.db_for_write(Party)
query = InsertQuery(Party)
query.insert_values([Party._meta.fields[0]], [], raw=False)
# this line will raise an AttributeError without the accompanying fix
query.get_compiler(using=db).as_sql()
def test_empty_choice(self):
# NOTE: Part of the regression test here is merely parsing the model
# declaration. The verbose_name, in particular, did not always work.
a = Article.objects.create(
headline="Look at me!", pub_date=datetime.datetime.now()
)
# An empty choice field should return None for the display name.
self.assertIs(a.get_status_display(), None)
# Empty strings should be returned as Unicode
a = Article.objects.get(pk=a.pk)
self.assertEqual(a.misc_data, '')
self.assertIs(type(a.misc_data), six.text_type)
def test_long_textfield(self):
# TextFields can hold more than 4000 characters (this was broken in
# Oracle).
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text="ABCDE" * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 5000)
def test_long_unicode_textfield(self):
# TextFields can hold more than 4000 bytes also when they are
# less than 4000 characters
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text='\u05d0\u05d1\u05d2' * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 3000)
def test_date_lookup(self):
# Regression test for #659
Party.objects.create(when=datetime.datetime(1999, 12, 31))
Party.objects.create(when=datetime.datetime(1998, 12, 31))
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create(when=datetime.datetime(1, 3, 3))
self.assertQuerysetEqual(
Party.objects.filter(when__month=2), []
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=1), [
datetime.date(1999, 1, 1)
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=12), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year=1998), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #8510
self.assertQuerysetEqual(
Party.objects.filter(when__day="31"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__month="12"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year="1998"), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #18969
self.assertQuerysetEqual(
Party.objects.filter(when__year=1), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__year='1'), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
def test_date_filter_null(self):
# Date filtering was failing with NULL date values in SQLite
# (regression test for #3501, among other things).
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create()
p = Party.objects.filter(when__month=1)[0]
self.assertEqual(p.when, datetime.date(1999, 1, 1))
self.assertQuerysetEqual(
Party.objects.filter(pk=p.pk).dates("when", "month"), [
1
],
attrgetter("month")
)
def test_get_next_prev_by_field(self):
# Check that get_next_by_FIELD and get_previous_by_FIELD don't crash
# when we have usecs values stored on the database
#
# It crashed after the Field.get_db_prep_* refactor, because on most
# backends DateTimeFields supports usecs, but DateTimeField.to_python
# didn't recognize them. (Note that
# Model._get_next_or_previous_by_FIELD coerces values to strings)
Event.objects.create(when=datetime.datetime(2000, 1, 1, 16, 0, 0))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 6, 1, 1))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 13, 1, 1))
e = Event.objects.create(when=datetime.datetime(2000, 1, 1, 12, 0, 20, 24))
self.assertEqual(
e.get_next_by_when().when, datetime.datetime(2000, 1, 1, 13, 1, 1)
)
self.assertEqual(
e.get_previous_by_when().when, datetime.datetime(2000, 1, 1, 6, 1, 1)
)
def test_primary_key_foreign_key_types(self):
# Check Department and Worker (non-default PK type)
d = Department.objects.create(id=10, name="IT")
w = Worker.objects.create(department=d, name="Full-time")
self.assertEqual(six.text_type(w), "Full-time")
def test_broken_unicode(self):
# Models with broken unicode methods should still have a printable repr
b = BrokenUnicodeMethod.objects.create(name="Jerry")
self.assertEqual(repr(b), "<BrokenUnicodeMethod: [Bad Unicode data]>")
@skipUnlessDBFeature("supports_timezones")
def test_timezones(self):
# Saving an updating with timezone-aware datetime Python objects.
# Regression test for #10443.
# The idea is that all these creations and saving should work without
# crashing. It's not rocket science.
dt1 = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=get_fixed_timezone(600))
dt2 = datetime.datetime(2008, 8, 31, 17, 20, tzinfo=get_fixed_timezone(600))
obj = Article.objects.create(
headline="A headline", pub_date=dt1, article_text="foo"
)
obj.pub_date = dt2
obj.save()
self.assertEqual(
Article.objects.filter(headline="A headline").update(pub_date=dt1),
1
)
def test_chained_fks(self):
"""
Regression for #18432: Chained foreign keys with to_field produce incorrect query
"""
m1 = Model1.objects.create(pkey=1000)
m2 = Model2.objects.create(model1=m1)
m3 = Model3.objects.create(model2=m2)
# this is the actual test for #18432
m3 = Model3.objects.get(model2=1000)
m3.model2
class ModelValidationTest(TestCase):
def test_pk_validation(self):
NonAutoPK.objects.create(name="one")
again = NonAutoPK(name="one")
with self.assertRaises(ValidationError):
again.validate_unique()
class EvaluateMethodTest(TestCase):
"""
Regression test for #13640: cannot filter by objects with 'evaluate' attr
"""
def test_model_with_evaluate_method(self):
"""
Ensures that you can filter by objects that have an 'evaluate' attr
"""
dept = Department.objects.create(pk=1, name='abc')
dept.evaluate = 'abc'
Worker.objects.filter(department=dept)
| bsd-3-clause |
ASCrookes/django | django/contrib/auth/tokens.py | 433 | 2803 | from datetime import date
from django.conf import settings
from django.utils import six
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.http import base36_to_int, int_to_base36
class PasswordResetTokenGenerator(object):
"""
Strategy object used to generate and check tokens for the password
reset mechanism.
"""
key_salt = "django.contrib.auth.tokens.PasswordResetTokenGenerator"
def make_token(self, user):
"""
Returns a token that can be used once to do a password reset
for the given user.
"""
return self._make_token_with_timestamp(user, self._num_days(self._today()))
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
# Parse the token
try:
ts_b36, hash = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
return False
# Check the timestamp is within limit
if (self._num_days(self._today()) - ts) > settings.PASSWORD_RESET_TIMEOUT_DAYS:
return False
return True
def _make_token_with_timestamp(self, user, timestamp):
# timestamp is number of days since 2001-1-1. Converted to
# base 36, this gives us a 3 digit string until about 2121
ts_b36 = int_to_base36(timestamp)
# By hashing on the internal state of the user and using state
# that is sure to change (the password salt will change as soon as
# the password is set, at least for current Django auth, and
# last_login will also change), we produce a hash that will be
# invalid as soon as it is used.
# We limit the hash to 20 chars to keep URL short
hash = salted_hmac(
self.key_salt,
self._make_hash_value(user, timestamp),
).hexdigest()[::2]
return "%s-%s" % (ts_b36, hash)
def _make_hash_value(self, user, timestamp):
# Ensure results are consistent across DB backends
login_timestamp = '' if user.last_login is None else user.last_login.replace(microsecond=0, tzinfo=None)
return (
six.text_type(user.pk) + user.password +
six.text_type(login_timestamp) + six.text_type(timestamp)
)
def _num_days(self, dt):
return (dt - date(2001, 1, 1)).days
def _today(self):
# Used for mocking in tests
return date.today()
default_token_generator = PasswordResetTokenGenerator()
| bsd-3-clause |
vipulroxx/sympy | sympy/galgebra/tests/test_ga.py | 7 | 24774 | # sympy/galgebra/tests/test_ga.py
"""
The reference D&L is "Geometric Algebra for Physicists" by Doran and Lasenby
"""
from sympy.core import expand, Rational, S, Symbol, symbols
from sympy.core.compatibility import range
from sympy.functions import sin, cos
from sympy.galgebra.ga import MV, Nga, Com
from sympy.galgebra.printing import GA_Printer
from sympy.matrices import Matrix
from sympy.simplify import collect, simplify
from sympy.utilities.pytest import XFAIL
def F(x, n, nbar):
"""
Conformal Mapping Function from 3D Euclidean space to 5D conformal space
where the images of all maps are null vectors.
"""
return Rational(1, 2)*((x*x)*n + 2*x - nbar)
def make_vector(a, m=3):
global n, nbar
if isinstance(a, str):
sym_str = ''
for i in range(m):
sym_str += a + str(i + 1) + ' '
sym_lst = list(symbols(sym_str))
sym_lst.append(S.Zero)
sym_lst.append(S.Zero)
a = MV(sym_lst, 'vector')
return F(a, n, nbar)
def test_rmul():
"""
Test for commutative scalar multiplication. Leftover from when sympy and
numpy were not working together and __mul__ and __rmul__ would not give the
same answer.
"""
x, y, z = MV.setup('x y z')
a, b, c = symbols('a b c')
assert 5*x == x*5
assert Rational(1, 2)*x == x*Rational(1, 2)
assert a*x == x*a
def test_contraction():
"""
Test for inner product and left and right contraction
"""
e_1, e_2, e_3 = MV.setup('e_1 e_2 e_3', '1 0 0, 0 1 0, 0 0 1')
assert ((e_1 ^ e_3) | e_1) == -e_3
assert ((e_1 ^ e_3) > e_1) == -e_3
assert (e_1 | (e_1 ^ e_3)) == e_3
assert (e_1 < (e_1 ^ e_3)) == e_3
assert ((e_1 ^ e_3) < e_1) == 0
assert (e_1 > (e_1 ^ e_3)) == 0
def test_substitution():
e_x, e_y, e_z = MV.setup('e_x e_y e_z', '1 0 0, 0 1 0, 0 0 1')
x, y, z = symbols('x y z')
X = x*e_x + y*e_y + z*e_z
Y = X.subs([(x, 2), (y, 3), (z, 4)])
assert Y == 2*e_x + 3*e_y + 4*e_z
def test_vector_extraction():
"""
Show that conformal bivector encodes two points. See D&L Section 10.4.1
"""
metric = ' 0 -1 #,' + \
'-1 0 #,' + \
' # # #,'
P1, P2, a = MV.setup('P1 P2 a', metric)
"""
P1 and P2 are null vectors and hence encode points in conformal space.
Show that P1 and P2 can be extracted from the bivector B = P1^P2. a is a
third vector in the conformal space with a.B not 0.
"""
B = P1 ^ P2
Bsq = B*B
ap = a - (a ^ B)*B
Ap = ap + ap*B
Am = ap - ap*B
P1dota = Symbol('(P1.a)')
P2dota = Symbol('(P2.a)')
Ap_test = (-2*P2dota)*P1
Am_test = (-2*P1dota)*P2
assert Ap == Ap_test
assert Am == Am_test
Ap2 = Ap*Ap
Am2 = Am*Am
assert Ap2 == S.Zero
assert Am2 == S.Zero
def test_metrics():
"""
Test specific metrics (diagpq, arbitrary_metric, arbitrary_metric_conformal)
"""
from sympy.galgebra.ga import diagpq, arbitrary_metric
metric = diagpq(3)
p1, p2, p3 = MV.setup('p1 p2 p3', metric, debug=0)
x1, y1, z1 = symbols('x1 y1 z1')
x2, y2, z2 = symbols('x2 y2 z2')
v1 = x1*p1 + y1*p2 + z1*p3
v2 = x2*p1 + y2*p2 + z2*p3
prod1 = v1*v2
prod2 = (v1|v2) + (v1^v2)
diff = prod1 - prod2
assert diff == MV(S.Zero)
metric = arbitrary_metric(3)
p1, p2, p3 = MV.setup('p1 p2 p3', metric, debug=0)
v1 = x1*p1 + y1*p2 + z1*p3
v2 = x2*p1 + y2*p2 + z2*p3
prod1 = v1*v2
prod2 = (v1|v2) + (v1^v2)
diff = prod1 - prod2
assert diff == MV(S.Zero)
@XFAIL
def test_metrics_xfail():
from sympy.galgebra.ga import arbitrary_metric_conformal
metric = arbitrary_metric_conformal(3)
p1, p2, p3 = MV.setup('p1 p2 p3', metric, debug=0)
v1 = x1*p1 + y1*p2 + z1*p3
v2 = x2*p1 + y2*p2 + z2*p3
prod1 = v1*v2
prod2 = (v1|v2) + (v1^v2)
diff = prod1 - prod2
assert diff == MV(S.Zero)
def test_geometry():
"""
Test conformal geometric description of circles, lines, spheres, and planes.
"""
metric = '1 0 0 0 0,' + \
'0 1 0 0 0,' + \
'0 0 1 0 0,' + \
'0 0 0 0 2,' + \
'0 0 0 2 0'
e0, e1, e2, n, nbar = MV.setup('e0 e1 e2 n nbar', metric, debug=0)
e = n + nbar
#conformal representation of points
A = F(e0, n, nbar) # point a = (1,0,0) A = F(a)
B = F(e1, n, nbar) # point b = (0,1,0) B = F(b)
C = F(-1*e0, n, nbar) # point c = (-1,0,0) C = F(c)
D = F(e2, n, nbar) # point d = (0,0,1) D = F(d)
x0, x1, x2 = symbols('x0 x1 x2')
X = F(MV([x0, x1, x2], 'vector'), n, nbar)
Circle = A ^ B ^ C ^ X
Line = A ^ B ^ n ^ X
Sphere = A ^ B ^ C ^ D ^ X
Plane = A ^ B ^ n ^ D ^ X
#Circle through a, b, and c
Circle_test = -x2*(e0 ^ e1 ^ e2 ^ n) + x2*(
e0 ^ e1 ^ e2 ^ nbar) + Rational(1, 2)*(-1 + x0**2 + x1**2 + x2**2)*(e0 ^ e1 ^ n ^ nbar)
diff = Circle - Circle_test
assert diff == S.Zero
#Line through a and b
Line_test = -x2*(e0 ^ e1 ^ e2 ^ n) + \
Rational(1, 2)*(-1 + x0 + x1)*(e0 ^ e1 ^ n ^ nbar) + \
(Rational(1, 2)*x2)*(e0 ^ e2 ^ n ^ nbar) + \
(-Rational(1, 2)*x2)*(e1 ^ e2 ^ n ^ nbar)
diff = Line - Line_test
assert diff == S.Zero
#Sphere through a, b, c, and d
Sphere_test = Rational(1, 2)*(1 - x0**2 - x1**2 - x2**2)*(e0 ^ e1 ^ e2 ^ n ^ nbar)
diff = Sphere - Sphere_test
assert diff == S.Zero
#Plane through a, b, and d
Plane_test = Rational(1, 2)*(1 - x0 - x1 - x2)*(e0 ^ e1 ^ e2 ^ n ^ nbar)
diff = Plane - Plane_test
assert diff == S.Zero
def test_extract_plane_and_line():
"""
Show that conformal trivector encodes planes and lines. See D&L section
10.4.2
"""
metric = '# # # 0 0,' + \
'# # # 0 0,' + \
'# # # 0 0,' + \
'0 0 0 0 2,' + \
'0 0 0 2 0'
p1, p2, p3, n, nbar = MV.setup('p1 p2 p3 n nbar', metric, debug=0)
P1 = F(p1, n, nbar)
P2 = F(p2, n, nbar)
P3 = F(p3, n, nbar)
#Line through p1 and p2
L = P1 ^ P2 ^ n
delta = (L | n) | nbar
delta_test = 2*p1 - 2*p2
diff = delta - delta_test
assert diff == S.Zero
#Plane through p1, p2, and p3
C = P1 ^ P2 ^ P3
delta = ((C ^ n) | n) | nbar
delta_test = 2*(p1 ^ p2) - 2*(p1 ^ p3) + 2*(p2 ^ p3)
diff = delta - delta_test
assert diff == S.Zero
@XFAIL
def test_reciprocal_frame():
"""
Test of formula for general reciprocal frame of three vectors.
Let three independent vectors be e1, e2, and e3. The reciprocal
vectors E1, E2, and E3 obey the relations:
e_i.E_j = delta_ij*(e1^e2^e3)**2
"""
metric = '1 # #,' + \
'# 1 #,' + \
'# # 1,'
e1, e2, e3 = MV.setup('e1 e2 e3', metric)
E = e1 ^ e2 ^ e3
Esq = (E*E)()
Esq_inv = 1/Esq
E1 = (e2 ^ e3)*E
E2 = (-1)*(e1 ^ e3)*E
E3 = (e1 ^ e2)*E
w = (E1 | e2)
w.collect(MV.g)
w = w().expand()
w = (E1 | e3)
w.collect(MV.g)
w = w().expand()
assert w == 0
w = (E2 | e1)
w.collect(MV.g)
w = w().expand()
assert w == 0
w = (E2 | e3)
w.collect(MV.g)
w = w().expand()
assert w == 0
w = (E3 | e1)
w.collect(MV.g)
w = w().expand()
assert w == 0
w = (E3 | e2)
w.collect(MV.g)
w = w().expand()
assert w == 0
w = (E1 | e1)
w = w().expand()
Esq = Esq.expand()
assert w/Esq == 1
w = (E2 | e2)
w = w().expand()
assert w/Esq == 1
w = (E3 | e3)
w = w().expand()
assert w/Esq == 1
@XFAIL
def test_derivative():
coords = x, y, z = symbols('x y z')
e_x, e_y, e_z, _ = MV.setup('e', '1 0 0, 0 1 0, 0 0 1', coords=coords)
X = x*e_x + y*e_y + z*e_z
a = MV('a', 'vector')
assert ((X | a).grad()) == a
assert ((X*X).grad()) == 2*X
assert (X*X*X).grad() == 5*X*X
assert X.grad_int() == 3
@XFAIL
def test_str():
e_1, e_2, e_3 = MV.setup('e_1 e_2 e_3', '1 0 0, 0 1 0, 0 0 1')
X = MV('x')
assert str(X) == 'x + x__1*e_1 + x__2*e_2 + x__3*e_3 + x__12*e_1^e_2 + x__13*e_1^e_3 + x__23*e_2^e_3 + x__123**e_1^e_2^e_3'
Y = MV('y', 'spinor')
assert str(Y) == 'y + y__12*e_1^e_2 + y__13*e_1^e_3 + y__23*e_2^e_3'
Z = X + Y
assert str(Z) == 'x + y + x__1*e_1 + x__2*e_2 + x__3*e_3 + (x__12 + y__12)*e_1^e_2 + (x__13 + y__13)*e_1^e_3 + (x__23 + y__23)*e_2^e_3 + x__123*e_1^e_2^e_3'
assert str(e_1 | e_1) == '1'
@XFAIL
def test_metric():
MV.setup('e_1 e_2 e_3', '[1,1,1]')
assert MV.metric == Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
@XFAIL
def test_constructor():
"""
Test various multivector constructors
"""
e_1, e_2, e_3 = MV.setup('e_1 e_2 e_3', '[1,1,1]')
assert str(MV('a', 'scalar')) == 'a'
assert str(MV('a', 'vector')) == 'a__1*e_1 + a__2*e_2 + a__3*e_3'
assert str(MV('a', 'pseudo')) == 'a__123*e_1^e_2^e_3'
assert str(MV('a', 'spinor')) == 'a + a__12*e_1^e_2 + a__13*e_1^e_3 + a__23*e_2^e_3'
assert str(MV('a')) == 'a + a__1*e_1 + a__2*e_2 + a__3*e_3 + a__12*e_1^e_2 + a__13*e_1^e_3 + a__23*e_2^e_3 + a__123*e_1^e_2^e_3'
assert str(MV([2, 'a'], 'grade')) == 'a__12*e_1^e_2 + a__13*e_1^e_3 + a__23*e_2^e_3'
assert str(MV('a', 'grade2')) == 'a__12*e_1^e_2 + a__13*e_1^e_3 + a__23*e_2^e_3'
def test_basic_multivector_operations():
with GA_Printer():
(ex, ey, ez) = MV.setup('e*x|y|z')
A = MV('A', 'mv')
assert str(A) == 'A + A__x*e_x + A__y*e_y + A__z*e_z + A__xy*e_x^e_y + A__xz*e_x^e_z + A__yz*e_y^e_z + A__xyz*e_x^e_y^e_z'
assert str(A) == 'A + A__x*e_x + A__y*e_y + A__z*e_z + A__xy*e_x^e_y + A__xz*e_x^e_z + A__yz*e_y^e_z + A__xyz*e_x^e_y^e_z'
assert str(A) == 'A + A__x*e_x + A__y*e_y + A__z*e_z + A__xy*e_x^e_y + A__xz*e_x^e_z + A__yz*e_y^e_z + A__xyz*e_x^e_y^e_z'
X = MV('X', 'vector')
Y = MV('Y', 'vector')
assert str(X) == 'X__x*e_x + X__y*e_y + X__z*e_z'
assert str(Y) == 'Y__x*e_x + Y__y*e_y + Y__z*e_z'
assert str((X*Y)) == '(e_x.e_x)*X__x*Y__x + (e_x.e_y)*X__x*Y__y + (e_x.e_y)*X__y*Y__x + (e_x.e_z)*X__x*Y__z + (e_x.e_z)*X__z*Y__x + (e_y.e_y)*X__y*Y__y + (e_y.e_z)*X__y*Y__z + (e_y.e_z)*X__z*Y__y + (e_z.e_z)*X__z*Y__z + (X__x*Y__y - X__y*Y__x)*e_x^e_y + (X__x*Y__z - X__z*Y__x)*e_x^e_z + (X__y*Y__z - X__z*Y__y)*e_y^e_z'
assert str((X ^ Y)) == '(X__x*Y__y - X__y*Y__x)*e_x^e_y + (X__x*Y__z - X__z*Y__x)*e_x^e_z + (X__y*Y__z - X__z*Y__y)*e_y^e_z'
assert str((X | Y)) == '(e_x.e_x)*X__x*Y__x + (e_x.e_y)*X__x*Y__y + (e_x.e_y)*X__y*Y__x + (e_x.e_z)*X__x*Y__z + (e_x.e_z)*X__z*Y__x + (e_y.e_y)*X__y*Y__y + (e_y.e_z)*X__y*Y__z + (e_y.e_z)*X__z*Y__y + (e_z.e_z)*X__z*Y__z'
(ex, ey) = MV.setup('e*x|y')
X = MV('X', 'vector')
A = MV('A', 'spinor')
assert str(X) == 'X__x*e_x + X__y*e_y'
assert str(A) == 'A + A__xy*e_x^e_y'
assert str((X | A)) == '(-A__xy*((e_x.e_y)*X__x + (e_y.e_y)*X__y))*e_x + (A__xy*((e_x.e_x)*X__x + (e_x.e_y)*X__y))*e_y'
assert str((X < A)) == '(-A__xy*((e_x.e_y)*X__x + (e_y.e_y)*X__y))*e_x + (A__xy*((e_x.e_x)*X__x + (e_x.e_y)*X__y))*e_y'
assert str((A > X)) == '(A__xy*((e_x.e_y)*X__x + (e_y.e_y)*X__y))*e_x + (-A__xy*((e_x.e_x)*X__x + (e_x.e_y)*X__y))*e_y'
(ex, ey) = MV.setup('e*x|y', metric='[1,1]')
X = MV('X', 'vector')
A = MV('A', 'spinor')
assert str(X) == 'X__x*e_x + X__y*e_y'
assert str(A) == 'A + A__xy*e_x^e_y'
assert str((X*A)) == '(A*X__x - A__xy*X__y)*e_x + (A*X__y + A__xy*X__x)*e_y'
assert str((X | A)) == '-A__xy*X__y*e_x + A__xy*X__x*e_y'
assert str((X < A)) == '-A__xy*X__y*e_x + A__xy*X__x*e_y'
assert str((X > A)) == 'A*X__x*e_x + A*X__y*e_y'
assert str((A*X)) == '(A*X__x + A__xy*X__y)*e_x + (A*X__y - A__xy*X__x)*e_y'
assert str((A | X)) == 'A__xy*X__y*e_x - A__xy*X__x*e_y'
assert str((A < X)) == 'A*X__x*e_x + A*X__y*e_y'
assert str((A > X)) == 'A__xy*X__y*e_x - A__xy*X__x*e_y'
return
def test_check_generalized_BAC_CAB_formulas():
with GA_Printer():
(a, b, c, d, e) = MV.setup('a b c d e')
assert str(a | (b*c)) == '-(a.c)*b + (a.b)*c'
assert str(a | (b ^ c)) == '-(a.c)*b + (a.b)*c'
assert str(a | (b ^ c ^ d)) == '(a.d)*b^c - (a.c)*b^d + (a.b)*c^d'
assert str((a | (b ^ c)) + (c | (a ^ b)) + (b | (c ^ a))) == '0'
assert str(a*(b ^ c) - b*(a ^ c) + c*(a ^ b)) == '3*a^b^c'
assert str(a*(b ^ c ^ d) - b*(a ^ c ^ d) + c*(a ^ b ^ d) - d*(a ^ b ^ c)) == '4*a^b^c^d'
assert str((a ^ b) | (c ^ d)) == '-(a.c)*(b.d) + (a.d)*(b.c)'
assert str(((a ^ b) | c) | d) == '-(a.c)*(b.d) + (a.d)*(b.c)'
assert str(Com(a ^ b, c ^ d)) == '-(b.d)*a^c + (b.c)*a^d + (a.d)*b^c - (a.c)*b^d'
assert str((a | (b ^ c)) | (d ^ e)) == '(-(a.b)*(c.e) + (a.c)*(b.e))*d + ((a.b)*(c.d) - (a.c)*(b.d))*e'
return
def test_derivatives_in_rectangular_coordinates():
with GA_Printer():
X = (x, y, z) = symbols('x y z')
(ex, ey, ez, grad) = MV.setup('e_x e_y e_z', metric='[1,1,1]', coords=X)
f = MV('f', 'scalar', fct=True)
A = MV('A', 'vector', fct=True)
B = MV('B', 'grade2', fct=True)
C = MV('C', 'mv', fct=True)
assert str(f) == 'f'
assert str(A) == 'A__x*e_x + A__y*e_y + A__z*e_z'
assert str(B) == 'B__xy*e_x^e_y + B__xz*e_x^e_z + B__yz*e_y^e_z'
assert str(C) == 'C + C__x*e_x + C__y*e_y + C__z*e_z + C__xy*e_x^e_y + C__xz*e_x^e_z + C__yz*e_y^e_z + C__xyz*e_x^e_y^e_z'
assert str(grad*f) == 'D{x}f*e_x + D{y}f*e_y + D{z}f*e_z'
assert str(grad | A) == 'D{x}A__x + D{y}A__y + D{z}A__z'
assert str(grad*A) == 'D{x}A__x + D{y}A__y + D{z}A__z + (-D{y}A__x + D{x}A__y)*e_x^e_y + (-D{z}A__x + D{x}A__z)*e_x^e_z + (-D{z}A__y + D{y}A__z)*e_y^e_z'
assert str(-MV.I*(grad ^ A)) == '(-D{z}A__y + D{y}A__z)*e_x + (D{z}A__x - D{x}A__z)*e_y + (-D{y}A__x + D{x}A__y)*e_z'
assert str(grad*B) == '(-(D{y}B__xy + D{z}B__xz))*e_x + (D{x}B__xy - D{z}B__yz)*e_y + (D{x}B__xz + D{y}B__yz)*e_z + (D{z}B__xy - D{y}B__xz + D{x}B__yz)*e_x^e_y^e_z'
assert str(grad ^ B) == '(D{z}B__xy - D{y}B__xz + D{x}B__yz)*e_x^e_y^e_z'
assert str(grad | B) == '(-(D{y}B__xy + D{z}B__xz))*e_x + (D{x}B__xy - D{z}B__yz)*e_y + (D{x}B__xz + D{y}B__yz)*e_z'
assert str(grad < A) == 'D{x}A__x + D{y}A__y + D{z}A__z'
assert str(grad > A) == 'D{x}A__x + D{y}A__y + D{z}A__z'
assert str(grad < B) == '(-(D{y}B__xy + D{z}B__xz))*e_x + (D{x}B__xy - D{z}B__yz)*e_y + (D{x}B__xz + D{y}B__yz)*e_z'
assert str(grad > B) == '0'
assert str(grad < C) == 'D{x}C__x + D{y}C__y + D{z}C__z + (-(D{y}C__xy + D{z}C__xz))*e_x + (D{x}C__xy - D{z}C__yz)*e_y + (D{x}C__xz + D{y}C__yz)*e_z + D{z}C__xyz*e_x^e_y - D{y}C__xyz*e_x^e_z + D{x}C__xyz*e_y^e_z'
assert str(grad > C) == 'D{x}C__x + D{y}C__y + D{z}C__z + D{x}C*e_x + D{y}C*e_y + D{z}C*e_z'
return
def test_derivatives_in_spherical_coordinates():
with GA_Printer():
X = (r, th, phi) = symbols('r theta phi')
curv = [[r*cos(phi)*sin(th), r*sin(phi)*sin(th), r*cos(th)], [1, r, r*sin(th)]]
(er, eth, ephi, grad) = MV.setup('e_r e_theta e_phi', metric='[1,1,1]', coords=X, curv=curv)
f = MV('f', 'scalar', fct=True)
A = MV('A', 'vector', fct=True)
B = MV('B', 'grade2', fct=True)
assert str(f) == 'f'
assert str(A) == 'A__r*e_r + A__theta*e_theta + A__phi*e_phi'
assert str(B) == 'B__rtheta*e_r^e_theta + B__rphi*e_r^e_phi + B__thetaphi*e_theta^e_phi'
assert str(grad*f) == 'D{r}f*e_r + D{theta}f/r*e_theta + D{phi}f/(r*sin(theta))*e_phi'
assert str(grad | A) == 'D{r}A__r + 2*A__r/r + A__theta*cos(theta)/(r*sin(theta)) + D{theta}A__theta/r + D{phi}A__phi/(r*sin(theta))'
assert str(-MV.I*(grad ^ A)) == '((A__phi*cos(theta)/sin(theta) + D{theta}A__phi - D{phi}A__theta/sin(theta))/r)*e_r + (-D{r}A__phi - A__phi/r + D{phi}A__r/(r*sin(theta)))*e_theta + (D{r}A__theta + A__theta/r - D{theta}A__r/r)*e_phi'
assert str(grad ^ B) == '(D{r}B__thetaphi - B__rphi*cos(theta)/(r*sin(theta)) + 2*B__thetaphi/r - D{theta}B__rphi/r + D{phi}B__rtheta/(r*sin(theta)))*e_r^e_theta^e_phi'
return
def test_rounding_numerical_components():
with GA_Printer():
(ex, ey, ez) = MV.setup('e_x e_y e_z', metric='[1,1,1]')
X = 1.2*ex + 2.34*ey + 0.555*ez
Y = 0.333*ex + 4*ey + 5.3*ez
assert str(X) == '1.20000000000000*e_x + 2.34000000000000*e_y + 0.555000000000000*e_z'
assert str(Nga(X, 2)) == '1.2*e_x + 2.3*e_y + 0.55*e_z'
assert str(X*Y) == '12.7011000000000 + 4.02078000000000*e_x^e_y + 6.17518500000000*e_x^e_z + 10.1820000000000*e_y^e_z'
assert str(Nga(X*Y, 2)) == '13. + 4.0*e_x^e_y + 6.2*e_x^e_z + 10.*e_y^e_z'
return
def test_noneuclidian_distance_calculation():
from sympy import solve, sqrt
with GA_Printer():
metric = '0 # #,# 0 #,# # 1'
(X, Y, e) = MV.setup('X Y e', metric)
assert str((X ^ Y)*(X ^ Y)) == '(X.Y)**2'
L = X ^ Y ^ e
B = L*e
assert str(B) == 'X^Y - (Y.e)*X^e + (X.e)*Y^e'
Bsq = B*B
assert str(Bsq) == '(X.Y)*((X.Y) - 2*(X.e)*(Y.e))'
Bsq = Bsq.scalar()
assert str(B) == 'X^Y - (Y.e)*X^e + (X.e)*Y^e'
BeBr = B*e*B.rev()
assert str(BeBr) == '((X.Y)*(-(X.Y) + 2*(X.e)*(Y.e)))*e'
assert str(B*B) == '(X.Y)*((X.Y) - 2*(X.e)*(Y.e))'
assert str(L*L) == '(X.Y)*((X.Y) - 2*(X.e)*(Y.e))'
(s, c, Binv, M, BigS, BigC, alpha, XdotY, Xdote, Ydote) = symbols('s c (1/B) M S C alpha (X.Y) (X.e) (Y.e)')
Bhat = Binv*B
R = c + s*Bhat
assert str(R) == 'c + (1/B)*s*X^Y - (1/B)*(Y.e)*s*X^e + (1/B)*(X.e)*s*Y^e'
Z = R*X*R.rev()
Z.obj = expand(Z.obj)
Z.obj = Z.obj.collect([Binv, s, c, XdotY])
assert str(Z) == '((1/B)**2*(X.Y)**2*s**2 - 2*(1/B)**2*(X.Y)*(X.e)*(Y.e)*s**2 + 2*(1/B)*(X.Y)*c*s - 2*(1/B)*(X.e)*(Y.e)*c*s + c**2)*X + 2*(1/B)*(X.e)**2*c*s*Y + (2*(1/B)*(X.Y)*(X.e)*s*(-(1/B)*(X.Y)*s + 2*(1/B)*(X.e)*(Y.e)*s - c))*e'
W = Z | Y
# From this point forward all calculations are with sympy scalars
W = W.scalar()
assert str(W) == '(1/B)**2*(X.Y)**3*s**2 - 4*(1/B)**2*(X.Y)**2*(X.e)*(Y.e)*s**2 + 4*(1/B)**2*(X.Y)*(X.e)**2*(Y.e)**2*s**2 + 2*(1/B)*(X.Y)**2*c*s - 4*(1/B)*(X.Y)*(X.e)*(Y.e)*c*s + (X.Y)*c**2'
W = expand(W)
W = simplify(W)
W = W.collect([s*Binv])
M = 1/Bsq
W = W.subs(Binv**2, M)
W = simplify(W)
Bmag = sqrt(XdotY**2 - 2*XdotY*Xdote*Ydote)
W = W.collect([Binv*c*s, XdotY])
#Double angle substitutions
W = W.subs(2*XdotY**2 - 4*XdotY*Xdote*Ydote, 2/(Binv**2))
W = W.subs(2*c*s, BigS)
W = W.subs(c**2, (BigC + 1)/2)
W = W.subs(s**2, (BigC - 1)/2)
W = simplify(W)
W = expand(W)
W = W.subs(1/Binv, Bmag)
assert str(W) == '(X.Y)*C - (X.e)*(Y.e)*C + (X.e)*(Y.e) + S*sqrt((X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e))'
Wd = collect(W, [BigC, BigS], exact=True, evaluate=False)
Wd_1 = Wd[S.One]
Wd_C = Wd[BigC]
Wd_S = Wd[BigS]
assert str(Wd_1) == '(X.e)*(Y.e)'
assert str(Wd_C) == '(X.Y) - (X.e)*(Y.e)'
assert str(Wd_S) == 'sqrt((X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e))'
assert str(Bmag) == 'sqrt((X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e))'
Wd_1 = Wd_1.subs(Bmag, 1/Binv)
Wd_C = Wd_C.subs(Bmag, 1/Binv)
Wd_S = Wd_S.subs(Bmag, 1/Binv)
lhs = Wd_1 + Wd_C*BigC
rhs = -Wd_S*BigS
lhs = lhs**2
rhs = rhs**2
W = expand(lhs - rhs)
W = expand(W.subs(1/Binv**2, Bmag**2))
W = expand(W.subs(BigS**2, BigC**2 - 1))
W = W.collect([BigC, BigC**2], evaluate=False)
a = simplify(W[BigC**2])
b = simplify(W[BigC])
c = simplify(W[S.One])
assert str(a) == '(X.e)**2*(Y.e)**2'
assert str(b) == '2*(X.e)*(Y.e)*((X.Y) - (X.e)*(Y.e))'
assert str(c) == '(X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e) + (X.e)**2*(Y.e)**2'
x = Symbol('x')
C = solve(a*x**2 + b*x + c, x)[0]
assert str(expand(simplify(expand(C)))) == '-(X.Y)/((X.e)*(Y.e)) + 1'
return
def test_conformal_representations_of_circles_lines_spheres_and_planes():
global n, nbar
with GA_Printer():
metric = '1 0 0 0 0,0 1 0 0 0,0 0 1 0 0,0 0 0 0 2,0 0 0 2 0'
(e1, e2, e3, n, nbar) = MV.setup('e_1 e_2 e_3 n nbar', metric)
e = n + nbar
#conformal representation of points
A = make_vector(e1)
B = make_vector(e2)
C = make_vector(-e1)
D = make_vector(e3)
X = make_vector('x', 3)
assert str(A) == 'e_1 + 1/2*n - 1/2*nbar'
assert str(B) == 'e_2 + 1/2*n - 1/2*nbar'
assert str(C) == '-e_1 + 1/2*n - 1/2*nbar'
assert str(D) == 'e_3 + 1/2*n - 1/2*nbar'
assert str(X) == 'x1*e_1 + x2*e_2 + x3*e_3 + ((x1**2 + x2**2 + x3**2)/2)*n - 1/2*nbar'
assert str((A ^ B ^ C ^ X)) == '-x3*e_1^e_2^e_3^n + x3*e_1^e_2^e_3^nbar + ((x1**2 + x2**2 + x3**2 - 1)/2)*e_1^e_2^n^nbar'
assert str((A ^ B ^ n ^ X)) == '-x3*e_1^e_2^e_3^n + ((x1 + x2 - 1)/2)*e_1^e_2^n^nbar + x3/2*e_1^e_3^n^nbar - x3/2*e_2^e_3^n^nbar'
assert str((((A ^ B) ^ C) ^ D) ^ X) == '((-x1**2 - x2**2 - x3**2 + 1)/2)*e_1^e_2^e_3^n^nbar'
assert str((A ^ B ^ n ^ D ^ X)) == '((-x1 - x2 - x3 + 1)/2)*e_1^e_2^e_3^n^nbar'
L = (A ^ B ^ e) ^ X
assert str(L) == '-x3*e_1^e_2^e_3^n - x3*e_1^e_2^e_3^nbar + (-x1**2/2 + x1 - x2**2/2 + x2 - x3**2/2 - 1/2)*e_1^e_2^n^nbar + x3*e_1^e_3^n^nbar - x3*e_2^e_3^n^nbar'
return
def test_properties_of_geometric_objects():
with GA_Printer():
metric = '# # # 0 0,' + \
'# # # 0 0,' + \
'# # # 0 0,' + \
'0 0 0 0 2,' + \
'0 0 0 2 0'
(p1, p2, p3, n, nbar) = MV.setup('p1 p2 p3 n nbar', metric)
P1 = F(p1, n, nbar)
P2 = F(p2, n, nbar)
P3 = F(p3, n, nbar)
L = P1 ^ P2 ^ n
delta = (L | n) | nbar
assert str(delta) == '2*p1 - 2*p2'
C = P1 ^ P2 ^ P3
delta = ((C ^ n) | n) | nbar
assert str(delta) == '2*p1^p2 - 2*p1^p3 + 2*p2^p3'
assert str((p2 - p1) ^ (p3 - p1)) == 'p1^p2 - p1^p3 + p2^p3'
return
def test_extracting_vectors_from_conformal_2_blade():
with GA_Printer():
metric = ' 0 -1 #,' + \
'-1 0 #,' + \
' # # #,'
(P1, P2, a) = MV.setup('P1 P2 a', metric)
B = P1 ^ P2
Bsq = B*B
assert str(Bsq) == '1'
ap = a - (a ^ B)*B
assert str(ap) == '-(P2.a)*P1 - (P1.a)*P2'
Ap = ap + ap*B
Am = ap - ap*B
assert str(Ap) == '-2*(P2.a)*P1'
assert str(Am) == '-2*(P1.a)*P2'
assert str(Ap*Ap) == '0'
assert str(Am*Am) == '0'
aB = a | B
assert str(aB) == '-(P2.a)*P1 + (P1.a)*P2'
return
def test_reciprocal_frame_test():
with GA_Printer():
metric = '1 # #,' + \
'# 1 #,' + \
'# # 1,'
(e1, e2, e3) = MV.setup('e1 e2 e3', metric)
E = e1 ^ e2 ^ e3
Esq = (E*E).scalar()
assert str(E) == 'e1^e2^e3'
assert str(Esq) == '(e1.e2)**2 - 2*(e1.e2)*(e1.e3)*(e2.e3) + (e1.e3)**2 + (e2.e3)**2 - 1'
Esq_inv = 1/Esq
E1 = (e2 ^ e3)*E
E2 = (-1)*(e1 ^ e3)*E
E3 = (e1 ^ e2)*E
assert str(E1) == '((e2.e3)**2 - 1)*e1 + ((e1.e2) - (e1.e3)*(e2.e3))*e2 + (-(e1.e2)*(e2.e3) + (e1.e3))*e3'
assert str(E2) == '((e1.e2) - (e1.e3)*(e2.e3))*e1 + ((e1.e3)**2 - 1)*e2 + (-(e1.e2)*(e1.e3) + (e2.e3))*e3'
assert str(E3) == '(-(e1.e2)*(e2.e3) + (e1.e3))*e1 + (-(e1.e2)*(e1.e3) + (e2.e3))*e2 + ((e1.e2)**2 - 1)*e3'
w = (E1 | e2)
w = w.expand()
assert str(w) == '0'
w = (E1 | e3)
w = w.expand()
assert str(w) == '0'
w = (E2 | e1)
w = w.expand()
assert str(w) == '0'
w = (E2 | e3)
w = w.expand()
assert str(w) == '0'
w = (E3 | e1)
w = w.expand()
assert str(w) == '0'
w = (E3 | e2)
w = w.expand()
assert str(w) == '0'
w = (E1 | e1)
w = (w.expand()).scalar()
Esq = expand(Esq)
assert str(simplify(w/Esq)) == '1'
w = (E2 | e2)
w = (w.expand()).scalar()
assert str(simplify(w/Esq)) == '1'
w = (E3 | e3)
w = (w.expand()).scalar()
assert str(simplify(w/Esq)) == '1'
return
| bsd-3-clause |
luzheqi1987/nova-annotation | nova/tests/unit/integrated/v3/test_remote_consoles.py | 1 | 3052 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.unit.integrated.v3 import test_servers
class ConsolesSampleJsonTests(test_servers.ServersSampleBase):
extension_name = "os-remote-consoles"
def setUp(self):
super(ConsolesSampleJsonTests, self).setUp()
self.flags(vnc_enabled=True)
self.flags(enabled=True, group='spice')
self.flags(enabled=True, group='rdp')
self.flags(enabled=True, group='serial_console')
def test_get_vnc_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'get-vnc-console-post-req',
{'action': 'os-getVNCConsole'})
subs = self._get_regexes()
subs["url"] = \
"((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
self._verify_response('get-vnc-console-post-resp', subs, response, 200)
def test_get_spice_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'get-spice-console-post-req',
{'action': 'os-getSPICEConsole'})
subs = self._get_regexes()
subs["url"] = \
"((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
self._verify_response('get-spice-console-post-resp', subs,
response, 200)
def test_get_rdp_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'get-rdp-console-post-req',
{'action': 'os-getRDPConsole'})
subs = self._get_regexes()
subs["url"] = \
"((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
self._verify_response('get-rdp-console-post-resp', subs,
response, 200)
def test_get_serial_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'get-serial-console-post-req',
{'action': 'os-getSerialConsole'})
subs = self._get_regexes()
subs["url"] = \
"((ws?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
self._verify_response('get-serial-console-post-resp', subs,
response, 200)
| apache-2.0 |
nightlydash/darkcoin | contrib/testgen/base58.py | 2139 | 2818 | '''
Bitcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/bitcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| mit |
AlexandraMercier/StrategyIA | ai/Algorithm/PathfinderRRT.py | 3 | 13347 | # Under MIT License, see LICENSE.txt
"""
Module intelligent contenant l'implementation d'un Rapidly exploring Random
Tree. Le module contient une classe qui peut être instanciée et qui calcule
les trajectoires des robots de l'équipe. Les détails de l'algorithme sont
disponibles sur la page wikipedia. Code original http://myenigma.hatenablog.com
/entry/2016/03/23/092002
"""
# FIXME IMPORT!
import copy
import math
import random
import time
from RULEngine.Debug.debug_interface import COLOR_ID_MAP, DEFAULT_PATH_TIMEOUT
from RULEngine.Util.Pose import Pose
from RULEngine.Util.Position import Position
from ai.Algorithm.IntelligentModule import Pathfinder
OBSTACLE_DEAD_ZONE = 700
TIME_TO_UPDATE = 1
class PathfinderRRT(Pathfinder):
"""
La classe hérite de IntelligentModule pour définir sa propriété state.
L'interface expose une méthode qui force le calcul de toutes les
trajectoires. Celles-ci sont enregistrés par effet de bords dans le
GameState.
Une méthode permet de récupérer la trajectoire d'un robot spécifique.
"""
def __init__(self, p_worldstate):
"""
Constructeur, appel le constructeur de la classe mère pour assigner
la référence sur l'InfoManager.
:param info_manager: référence sur l'InfoManager
"""
super().__init__(p_worldstate)
self.paths = {}
for i in range(6):
self.paths[i] = []
self.last_timestamp = self.ws.game_state.get_timestamp()
# Pour être conforme à la nouvelle interface à être changé
# éventuellement mgl 2016/12/23
# TODO(mgl): change this please!
def get_next_point(self, robot_id=None):
pass
def update(self):
pass
def draw_path(self, path, pid=0):
points = []
for path_element in path:
x = path_element.position.x
y = path_element.position.y
points.append((x,y))
self.debug_interface.add_multiple_points(points, COLOR_ID_MAP[pid], width=5, link="path - " + str(pid),
timeout=DEFAULT_PATH_TIMEOUT)
def get_path(self, pid=None, target=None):
"""
Retourne la trajectoire du robot.
:param pid: Identifiant du robot, 0 à 5.
:return: Une liste de Pose, [Pose]
"""
assert(isinstance(pid, int)), "Un pid doit être passé"
assert(isinstance(target, Pose)), "La cible doit être une Pose"
return self._compute_path(pid, target)
def _compute_path(self, pid, target):
"""
Cette méthode calcul la trajectoire pour un robot.
:param pid: L'identifiant du robot, 0 à 5.
:return: None
"""
# TODO mettre les buts dans les obstacles
list_of_pid = list(range(6))
list_of_other_team_pid = list(range(6))
list_of_pid.remove(pid)
obstacleList = []
for other_pid in list_of_pid:
# TODO info manager changer get_player_position
position = self.ws.game_state.get_player_pose(other_pid).position
obstacleList.append([position.x, position.y, OBSTACLE_DEAD_ZONE])
initial_position_of_main_player = self.ws.game_state.get_player_pose(pid).position
for pid in list_of_other_team_pid:
position = self.ws.game_state.get_player_pose(pid,False).position
obstacleList.append([position.x, position.y, OBSTACLE_DEAD_ZONE])
target_position_of_player = target.position
target_orientation_of_player = target.orientation
assert(isinstance(target_position_of_player, Position)), "La cible du joueur doit être une Position"
try :
target_position_of_player.x
target_position_of_player.y
except AttributeError:
target_position_of_player = self.ws.game_state.get_player_pose(pid).position
rrt = RRT(start=[initial_position_of_main_player.x,
initial_position_of_main_player.y],
goal=[target_position_of_player.x, target_position_of_player.y],
obstacleList=obstacleList,
# TODO Vérifier si le robot peut sortir du terrain
rand_area=[-4500, 4500],
expand_dis=get_expand_dis([initial_position_of_main_player.x,
initial_position_of_main_player.y],
[target_position_of_player.x, target_position_of_player.y]),
goal_sample_rate=get_goal_sample_rate([initial_position_of_main_player.x,
initial_position_of_main_player.y],
[target_position_of_player.x, target_position_of_player.y]))
not_smoothed_path = rrt.planning(obstacleList)
# Path smoothing
maxIter = 100
# Il faut inverser la liste du chemin lissé tout en retirant le point de départ
smoothed_path = path_smoothing(not_smoothed_path, maxIter, obstacleList)
smoothed_path = list(reversed(smoothed_path[:-1]))
return self._smoothed_path_to_pose_list(smoothed_path, target_orientation_of_player)
def _smoothed_path_to_pose_list(self, smoothed_path, target_orientation):
smoothed_poses = []
for point in smoothed_path:
smoothed_poses.append(Pose(Position(point[0], point[1]), target_orientation))
return smoothed_poses
class RRT():
"""
Classe principale du pathfinder, contient les fonctions principales
permettant de générer le path.
"""
def __init__(self, start, goal, obstacleList, rand_area, expand_dis, goal_sample_rate, max_iteration=50):
"""
Setting Parameter
start: Position de départ [x,y]
goal: Destination [x,y]
obstacleList: Position et taille des obstacles [[x,y,size],...]
randArea: Ramdom Samping Area [min,max]
expand_dis : Longueur des arêtes
goal_sample_rate : Probabilité d'obtenir directement le goal comme position.
Améliore la vitesse du RRT
max_iteration : Nombre d'itération du path smoother
"""
self.start = Node(start[0], start[1])
self.end = Node(goal[0], goal[1])
self.minrand = rand_area[0]
self.maxrand = rand_area[1]
self.expand_dis = expand_dis
self.goal_sample_rate = goal_sample_rate
self.max_iteration = max_iteration
def planning(self, obstacleList):
"""Fonction qui s'occupe de faire le path"""
initial_time = time.time()
self.node_list = [self.start]
#TODO changer le gros hack degueux pour la gestion de la loop infinie
while True and time.time()-initial_time < TIME_TO_UPDATE:
# Random Sampling
if random.randint(0, 100) > self.goal_sample_rate:
random_coordinates = [random.uniform(self.minrand, self.maxrand), random.uniform(self.minrand, self.maxrand)]
else:
random_coordinates = [self.end.x, self.end.y]
# Find nearest node
nind = self.get_nearest_list_index(self.node_list, random_coordinates)
# print(nind)
# expand tree
nearest_node = self.node_list[nind]
theta = math.atan2(random_coordinates[1] - nearest_node.y, random_coordinates[0] - nearest_node.x)
new_node = copy.deepcopy(nearest_node)
new_node.x += self.expand_dis * math.cos(theta)
new_node.y += self.expand_dis * math.sin(theta)
new_node.parent = nind
if not self.__collision_check(new_node, obstacleList):
continue
self.node_list.append(new_node)
# check goal
dx = new_node.x - self.end.x
dy = new_node.y - self.end.y
d = math.sqrt(dx * dx + dy * dy)
if d <= self.expand_dis:
break
path = [[self.end.x, self.end.y]]
last_index = len(self.node_list) - 1
while self.node_list[last_index].parent is not None:
node = self.node_list[last_index]
path.append([node.x, node.y])
last_index = node.parent
path.append([self.start.x, self.start.y])
# TODO fix gros hack sale
if time.time()-initial_time >=1 :
path = [[self.start.x, self.start.y],[self.start.x, self.start.y]]
return path
def get_nearest_list_index(self, node_list, rnd):
dlist = [(node.x - rnd[0]) ** 2 + (node.y - rnd[1]) ** 2 for node in node_list]
minind = dlist.index(min(dlist))
return minind
def __collision_check(self, node, obstacleList):
""" Permet de vérifier si le chemin passe à travers un obstacle"""
for (ox, oy, size) in obstacleList:
dx = ox - node.x
dy = oy - node.y
d = math.sqrt(dx * dx + dy * dy)
if d <= size:
return False # collision
return True # safe
class Node():
"""
RRT Node
"""
def __init__(self, x, y):
self.x = x
self.y = y
self.parent = None
def get_expand_dis(start, goal):
"""Modifie la distance entre 2 noeuds selon la distance entre le départ et le but.
Utile pour la précision et les performances."""
try :
dx = goal[0]-start[0]
dy = goal[1]-start[1]
d = math.sqrt(dx * dx + dy * dy)
# TODO voir comment on regle ça
except TypeError:
d = 0
if d < 600 :
expand_dis = d/2
else :
expand_dis = 300
return expand_dis
def get_goal_sample_rate(start, goal):
"""Modifie la probabilité d'obtenir directement le but comme point selon la distance entre le départ et le but.
Utile pour la précision et les performances."""
try :
dx = goal[0]-start[0]
dy = goal[1]-start[1]
d = math.sqrt(dx * dx + dy * dy)
except TypeError:
goal_sample_rate = 5
return goal_sample_rate
if d < 600 :
goal_sample_rate = (10-d/140)**2
else :
goal_sample_rate = 30
return goal_sample_rate
def get_path_length(path):
"""Donne la longueur du trajet"""
path_length = 0
try :
for i in range(len(path) - 1):
dx = path[i + 1][0] - path[i][0]
dy = path[i + 1][1] - path[i][1]
d = math.sqrt(dx * dx + dy * dy)
path_length += d
except TypeError:
pass
return path_length
def get_target_point(path, targetL):
l = 0
ti = 0
last_pair_len = 0
for i in range(len(path) - 1):
dx = path[i + 1][0] - path[i][0]
dy = path[i + 1][1] - path[i][1]
d = math.sqrt(dx * dx + dy * dy)
l += d
if l >= targetL:
ti = i-1
last_pair_len = d
break
try :
partRatio = (l - targetL) / last_pair_len
except ZeroDivisionError:
partRatio = 0
# print(partRatio)
# print((ti,len(path),path[ti],path[ti+1]))
x = path[ti][0] + (path[ti + 1][0] - path[ti][0]) * partRatio
y = path[ti][1] + (path[ti + 1][1] - path[ti][1]) * partRatio
# print((x,y))
return [x, y, ti]
def line_collision_check(first, second, obstacleList):
"""
Vérifie si la ligne entre 2 noeuds entre en collision avec un obstacle.
"""
# Line Equation
x1 = first[0]
y1 = first[1]
x2 = second[0]
y2 = second[1]
try:
a = y2-y1
b = -(x2-x1)
c = y2 * (x2-x1) - x2 * (y2-y1)
except ZeroDivisionError:
return False
# print(first)
# print(second)
for (ox, oy, size) in obstacleList:
d = abs(a*ox+b*oy+c)/(math.sqrt(a*a+b*b))
# print((ox,oy,size,d))
if d <= (size):
# print("NG")
return False
# print("OK")
return True # OK
def path_smoothing(path, maxIter, obstacleList):
# Elle ralentit légèrement le tout, voir si améliorable
"""Permet de rendre le trajet obtenu avec le RRT plus lisse"""
# print("PathSmoothing")
path_length = get_path_length(path)
for i in range(maxIter):
# Sample two points
pick_points = [random.uniform(0, path_length), random.uniform(0, path_length)]
pick_points.sort()
# print(pick_points)
first = get_target_point(path, pick_points[0])
# print(first)
second = get_target_point(path, pick_points[1])
# print(second)
if first[2] <= 0 or second[2] <= 0:
continue
if (second[2]+1) > len(path):
continue
if second[2] == first[2]:
continue
# collision check
if not line_collision_check(first, second, obstacleList):
continue
#Create New path
new_path = []
new_path.extend(path[:first[2]+1])
new_path.append([first[0], first[1]])
new_path.append([second[0], second[1]])
new_path.extend(path[second[2]+1:])
path = new_path
path_length = get_path_length(path)
return path
# taille terrain = 9000 x 6000
| mit |
myriadcoin/myriadcoin | test/functional/mining_getblocktemplate_longpoll.py | 9 | 3316 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test longpolling with getblocktemplate."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import get_rpc_proxy, random_transaction
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
template = node.getblocktemplate({'rules': ['segwit']})
self.longpollid = template['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid': self.longpollid, 'rules': ['segwit']})
class GetBlockTemplateLPTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.nodes[0].generate(10)
template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
longpollid = template['longpollid']
# longpollid should not change between successive invocations if nothing else happens
template2 = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert(template2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
# min_relay_fee is fee per 1000 bytes, which should be more than enough.
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| mit |
wolfram74/numerical_methods_iserles_notes | venv/lib/python2.7/site-packages/numpy/testing/tests/test_doctesting.py | 224 | 1322 | """ Doctests for NumPy-specific nose/doctest modifications
"""
from __future__ import division, absolute_import, print_function
# try the #random directive on the output line
def check_random_directive():
'''
>>> 2+2
<BadExample object at 0x084D05AC> #random: may vary on your system
'''
# check the implicit "import numpy as np"
def check_implicit_np():
'''
>>> np.array([1,2,3])
array([1, 2, 3])
'''
# there's some extraneous whitespace around the correct responses
def check_whitespace_enabled():
'''
# whitespace after the 3
>>> 1+2
3
# whitespace before the 7
>>> 3+4
7
'''
def check_empty_output():
""" Check that no output does not cause an error.
This is related to nose bug 445; the numpy plugin changed the
doctest-result-variable default and therefore hit this bug:
http://code.google.com/p/python-nose/issues/detail?id=445
>>> a = 10
"""
def check_skip():
""" Check skip directive
The test below should not run
>>> 1/0 #doctest: +SKIP
"""
if __name__ == '__main__':
# Run tests outside numpy test rig
import nose
from numpy.testing.noseclasses import NumpyDoctest
argv = ['', __file__, '--with-numpydoctest']
nose.core.TestProgram(argv=argv, addplugins=[NumpyDoctest()])
| mit |
liuziyan/cf-php-build-pack | lib/build_pack_utils/detecter.py | 49 | 2523 | import os
import re
import logging
from itertools import chain
class BaseFileSearch(object):
def __init__(self):
self._log = logging.getLogger('detecter')
self.recursive = False
self.fullPath = False
def _match(self, term):
return True
def search(self, root):
if self.recursive:
self._log.debug("Recursively search [%s]", root)
for head, dirs, files in os.walk(root):
for name in chain(dirs, files):
if self.fullPath:
name = os.path.join(head, name)
if self._match(name):
self._log.debug("File [%s] matched.", name)
return True
self._log.debug("File [%s] didn't match.", name)
return False
else:
self._log.debug("Searching [%s]", root)
for name in os.listdir(root):
if self.fullPath:
name = os.path.join(root, name)
if self._match(name):
self._log.debug("File [%s] matched.", name)
return True
self._log.debug("File [%s] didn't match.", name)
class TextFileSearch(BaseFileSearch):
def __init__(self, text):
BaseFileSearch.__init__(self)
self._text = text
def _match(self, term):
if self._text:
return term == self._text
class RegexFileSearch(BaseFileSearch):
def __init__(self, regex):
BaseFileSearch.__init__(self)
if hasattr(regex, 'strip'):
self._regex = re.compile(regex)
else:
self._regex = regex
def _match(self, term):
if self._regex:
return (self._regex.match(term) is not None)
class StartsWithFileSearch(BaseFileSearch):
def __init__(self, start):
BaseFileSearch.__init__(self)
self._start = start
def _match(self, term):
if self._start:
return term.startswith(self._start)
class EndsWithFileSearch(BaseFileSearch):
def __init__(self, end):
BaseFileSearch.__init__(self)
self._end = end
def _match(self, term):
if self._end:
return term.endswith(self._end)
class ContainsFileSearch(BaseFileSearch):
def __init__(self, contains):
BaseFileSearch.__init__(self)
self._contains = contains
def _match(self, term):
if self._contains:
return term.find(self._contains) >= 0
| apache-2.0 |
Ms2ger/presto-testo | wpt/websockets/mod_pywebsocket/memorizingfile.py | 7 | 3111 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Memorizing file.
A memorizing file wraps a file and memorizes lines read by readline.
"""
import sys
class MemorizingFile(object):
"""MemorizingFile wraps a file and memorizes lines read by readline.
Note that data read by other methods are not memorized. This behavior
is good enough for memorizing lines SimpleHTTPServer reads before
the control reaches WebSocketRequestHandler.
"""
def __init__(self, file_, max_memorized_lines=sys.maxint):
"""Construct an instance.
Args:
file_: the file object to wrap.
max_memorized_lines: the maximum number of lines to memorize.
Only the first max_memorized_lines are memorized.
Default: sys.maxint.
"""
self._file = file_
self._memorized_lines = []
self._max_memorized_lines = max_memorized_lines
def __getattribute__(self, name):
if name in ('_file', '_memorized_lines', '_max_memorized_lines',
'readline', 'get_memorized_lines'):
return object.__getattribute__(self, name)
return self._file.__getattribute__(name)
def readline(self):
"""Override file.readline and memorize the line read."""
line = self._file.readline()
if line and len(self._memorized_lines) < self._max_memorized_lines:
self._memorized_lines.append(line)
return line
def get_memorized_lines(self):
"""Get lines memorized so far."""
return self._memorized_lines
# vi:sts=4 sw=4 et
| bsd-3-clause |
kasioumis/invenio | invenio/modules/formatter/format_elements/bfe_plots.py | 13 | 3661 | # -*- coding: utf-8 -*-
#
# $Id: bfe_CERN_plots.py,v 1.3 2009/03/17 10:55:15 jerome Exp $
#
# This file is part of Invenio.
# Copyright (C) 2010, 2011, 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Display image of the plot if we are in selected plots collection
"""
from invenio.legacy.bibdocfile.api import BibRecDocs
from invenio.utils.url import create_html_link
from invenio.config import CFG_SITE_RECORD
from invenio.base.i18n import gettext_set_language
try:
from invenio.config import CFG_BASE_URL
except ImportError:
from invenio.config import CFG_SITE_URL
CFG_BASE_URL = CFG_SITE_URL
def format_element(bfo, width="", caption="yes", max_plots="3"):
"""
Display image of the plot if we are in selected plots collections
To achieve this, we take the pngs associated with this document
@param width: the width of the returned image (Eg: '100px')
@param caption: display the captions or not?
@param max_plots: the maximum number of plots to display (-1 is all plots)
"""
_ = gettext_set_language(bfo.lang)
img_files = []
try:
max_plots = int(max_plots)
except ValueError:
# Someone tried to squeeze in something non-numerical. Hah!
max_plots = 3
link = ""
bibarchive = BibRecDocs(bfo.recID)
if width != "":
width = 'width="%s"' % width
for doc in bibarchive.list_bibdocs(doctype="Plot"):
for _file in doc.list_latest_files():
if _file.subformat == "context":
# Ignore context files
continue
caption_text = _file.get_description()[5:]
index = int(_file.get_description()[:5])
img_location = _file.get_url()
img = '<img style="vertical-align:middle;" src="%s" title="%s" %s/>' % \
(img_location, caption_text, width)
plotlink = create_html_link(urlbase='%s/%s/%s/plots#%d' %
(CFG_BASE_URL,
CFG_SITE_RECORD,
bfo.recID,
index),
urlargd={},
link_label=img)
img_files.append((index, plotlink))
img_files = sorted(img_files, key=lambda x: x[0])
if max_plots > 0:
img_files = img_files[:max_plots]
if len(img_files) >= max_plots:
link = "<a href='/%s/%s/plots'>%s</a>" % \
(CFG_SITE_RECORD, bfo.recID, _("Show more plots"))
for index in range(len(img_files)):
img_files[index] = img_files[index][1]
if len(img_files) == 0:
return ''
return '<div style="overflow-x:auto;display:inline;width:100%;">' +\
" ".join(img_files) + ' ' + link + '</div>'
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 |
hashems/Mobile-Cloud-Development-Projects | appengine/standard/xmpp/xmpp_test.py | 9 | 1598 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import pytest
import webtest
import xmpp
@pytest.fixture
def app(testbed):
return webtest.TestApp(xmpp.app)
@mock.patch('xmpp.xmpp')
def test_chat(xmpp_mock, app):
app.post('/_ah/xmpp/message/chat/', {
'from': 'sender@example.com',
'to': 'recipient@example.com',
'body': 'hello',
})
@mock.patch('xmpp.xmpp')
def test_subscribe(xmpp_mock, app):
app.post('/_ah/xmpp/subscribe')
@mock.patch('xmpp.xmpp')
def test_check_presence(xmpp_mock, app):
app.post('/_ah/xmpp/presence/available', {
'from': 'sender@example.com'
})
@mock.patch('xmpp.xmpp')
def test_send_presence(xmpp_mock, app):
app.post('/send_presence', {
'jid': 'node@domain/resource'
})
@mock.patch('xmpp.xmpp')
def test_error(xmpp_mock, app):
app.post('/_ah/xmpp/error/', {
'from': 'sender@example.com',
'stanza': 'hello world'
})
@mock.patch('xmpp.xmpp')
def test_send_chat(xmpp_mock, app):
app.post('/send_chat')
| apache-2.0 |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/distutils/bcppcompiler.py | 250 | 14941 | """distutils.bcppcompiler
Contains BorlandCCompiler, an implementation of the abstract CCompiler class
for the Borland C++ compiler.
"""
# This implementation by Lyle Johnson, based on the original msvccompiler.py
# module and using the directions originally published by Gordon Williams.
# XXX looks like there's a LOT of overlap between these two classes:
# someone should sit down and factor out the common code as
# WindowsCCompiler! --GPW
__revision__ = "$Id$"
import os
from distutils.errors import (DistutilsExecError, CompileError, LibError,
LinkError, UnknownFileError)
from distutils.ccompiler import CCompiler, gen_preprocess_options
from distutils.file_util import write_file
from distutils.dep_util import newer
from distutils import log
class BCPPCompiler(CCompiler) :
"""Concrete class that implements an interface to the Borland C/C++
compiler, as defined by the CCompiler abstract class.
"""
compiler_type = 'bcpp'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = _c_extensions + _cpp_extensions
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
# These executables are assumed to all be in the path.
# Borland doesn't seem to use any special registry settings to
# indicate their installation locations.
self.cc = "bcc32.exe"
self.linker = "ilink32.exe"
self.lib = "tlib.exe"
self.preprocess_options = None
self.compile_options = ['/tWM', '/O2', '/q', '/g0']
self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0']
self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_static = []
self.ldflags_exe = ['/Gn', '/q', '/x']
self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r']
# -- Worker methods ------------------------------------------------
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
compile_opts = extra_preargs or []
compile_opts.append ('-c')
if debug:
compile_opts.extend (self.compile_options_debug)
else:
compile_opts.extend (self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
# XXX why do the normpath here?
src = os.path.normpath(src)
obj = os.path.normpath(obj)
# XXX _setup_compile() did a mkpath() too but before the normpath.
# Is it possible to skip the normpath?
self.mkpath(os.path.dirname(obj))
if ext == '.res':
# This is already a binary file -- skip it.
continue # the 'for' loop
if ext == '.rc':
# This needs to be compiled to a .res file -- do it now.
try:
self.spawn (["brcc32", "-fo", obj, src])
except DistutilsExecError, msg:
raise CompileError, msg
continue # the 'for' loop
# The next two are both for the real compiler.
if ext in self._c_extensions:
input_opt = ""
elif ext in self._cpp_extensions:
input_opt = "-P"
else:
# Unknown file type -- no extra options. The compiler
# will probably fail, but let it just in case this is a
# file the compiler recognizes even if we don't.
input_opt = ""
output_opt = "-o" + obj
# Compiler command line syntax is: "bcc32 [options] file(s)".
# Note that the source file names must appear at the end of
# the command line.
try:
self.spawn ([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs + [src])
except DistutilsExecError, msg:
raise CompileError, msg
return objects
# compile ()
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
(objects, output_dir) = self._fix_object_args (objects, output_dir)
output_filename = \
self.library_filename (output_libname, output_dir=output_dir)
if self._need_link (objects, output_filename):
lib_args = [output_filename, '/u'] + objects
if debug:
pass # XXX what goes here?
try:
self.spawn ([self.lib] + lib_args)
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# create_static_lib ()
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# XXX this ignores 'build_temp'! should follow the lead of
# msvccompiler.py
(objects, output_dir) = self._fix_object_args (objects, output_dir)
(libraries, library_dirs, runtime_library_dirs) = \
self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
if runtime_library_dirs:
log.warn("I don't know what to do with 'runtime_library_dirs': %s",
str(runtime_library_dirs))
if output_dir is not None:
output_filename = os.path.join (output_dir, output_filename)
if self._need_link (objects, output_filename):
# Figure out linker args based on type of target.
if target_desc == CCompiler.EXECUTABLE:
startup_obj = 'c0w32'
if debug:
ld_args = self.ldflags_exe_debug[:]
else:
ld_args = self.ldflags_exe[:]
else:
startup_obj = 'c0d32'
if debug:
ld_args = self.ldflags_shared_debug[:]
else:
ld_args = self.ldflags_shared[:]
# Create a temporary exports file for use by the linker
if export_symbols is None:
def_file = ''
else:
head, tail = os.path.split (output_filename)
modname, ext = os.path.splitext (tail)
temp_dir = os.path.dirname(objects[0]) # preserve tree structure
def_file = os.path.join (temp_dir, '%s.def' % modname)
contents = ['EXPORTS']
for sym in (export_symbols or []):
contents.append(' %s=_%s' % (sym, sym))
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# Borland C++ has problems with '/' in paths
objects2 = map(os.path.normpath, objects)
# split objects in .obj and .res files
# Borland C++ needs them at different positions in the command line
objects = [startup_obj]
resources = []
for file in objects2:
(base, ext) = os.path.splitext(os.path.normcase(file))
if ext == '.res':
resources.append(file)
else:
objects.append(file)
for l in library_dirs:
ld_args.append("/L%s" % os.path.normpath(l))
ld_args.append("/L.") # we sometimes use relative paths
# list of object files
ld_args.extend(objects)
# XXX the command-line syntax for Borland C++ is a bit wonky;
# certain filenames are jammed together in one big string, but
# comma-delimited. This doesn't mesh too well with the
# Unix-centric attitude (with a DOS/Windows quoting hack) of
# 'spawn()', so constructing the argument list is a bit
# awkward. Note that doing the obvious thing and jamming all
# the filenames and commas into one argument would be wrong,
# because 'spawn()' would quote any filenames with spaces in
# them. Arghghh!. Apparently it works fine as coded...
# name of dll/exe file
ld_args.extend([',',output_filename])
# no map file and start libraries
ld_args.append(',,')
for lib in libraries:
# see if we find it and if there is a bcpp specific lib
# (xxx_bcpp.lib)
libfile = self.find_library_file(library_dirs, lib, debug)
if libfile is None:
ld_args.append(lib)
# probably a BCPP internal library -- don't warn
else:
# full name which prefers bcpp_xxx.lib over xxx.lib
ld_args.append(libfile)
# some default libraries
ld_args.append ('import32')
ld_args.append ('cw32mt')
# def file for export symbols
ld_args.extend([',',def_file])
# add resource files
ld_args.append(',')
ld_args.extend(resources)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath (os.path.dirname (output_filename))
try:
self.spawn ([self.linker] + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# link ()
# -- Miscellaneous methods -----------------------------------------
def find_library_file (self, dirs, lib, debug=0):
# List of effective library names to try, in order of preference:
# xxx_bcpp.lib is better than xxx.lib
# and xxx_d.lib is better than xxx.lib if debug is set
#
# The "_bcpp" suffix is to handle a Python installation for people
# with multiple compilers (primarily Distutils hackers, I suspect
# ;-). The idea is they'd have one static library for each
# compiler they care about, since (almost?) every Windows compiler
# seems to have a different format for static libraries.
if debug:
dlib = (lib + "_d")
try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib)
else:
try_names = (lib + "_bcpp", lib)
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename(name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res':
# these can go unchanged
obj_names.append (os.path.join (output_dir, base + ext))
elif ext == '.rc':
# these need to be compiled to .res-files
obj_names.append (os.path.join (output_dir, base + '.res'))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def preprocess (self,
source,
output_file=None,
macros=None,
include_dirs=None,
extra_preargs=None,
extra_postargs=None):
(_, macros, include_dirs) = \
self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = ['cpp32.exe'] + pp_opts
if output_file is not None:
pp_args.append('-o' + output_file)
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or the
# source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError, msg:
print msg
raise CompileError, msg
# preprocess()
| gpl-2.0 |
chromium/chromium | third_party/blink/web_tests/external/wpt/webdriver/tests/refresh/user_prompts.py | 16 | 4115 | # META: timeout=long
import pytest
from webdriver.error import StaleElementReferenceException
from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
def refresh(session):
return session.transport.send(
"POST", "session/{session_id}/refresh".format(**vars(session)))
@pytest.fixture
def check_user_prompt_closed_without_exception(session, create_dialog, inline):
def check_user_prompt_closed_without_exception(dialog_type, retval):
session.url = inline("<div id=foo>")
element = session.find.css("#foo", all=False)
create_dialog(dialog_type, text=dialog_type)
response = refresh(session)
assert_success(response)
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
with pytest.raises(StaleElementReferenceException):
element.property("id")
return check_user_prompt_closed_without_exception
@pytest.fixture
def check_user_prompt_closed_with_exception(session, create_dialog, inline):
def check_user_prompt_closed_with_exception(dialog_type, retval):
session.url = inline("<div id=foo>")
element = session.find.css("#foo", all=False)
create_dialog(dialog_type, text=dialog_type)
response = refresh(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert element.property("id") == "foo"
return check_user_prompt_closed_with_exception
@pytest.fixture
def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
def check_user_prompt_not_closed_but_exception(dialog_type):
session.url = inline("<div id=foo>")
element = session.find.css("#foo", all=False)
create_dialog(dialog_type, text=dialog_type)
response = refresh(session)
assert_error(response, "unexpected alert open")
assert session.alert.text == dialog_type
session.alert.dismiss()
assert element.property("id") == "foo"
return check_user_prompt_not_closed_but_exception
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_accept(check_user_prompt_closed_without_exception, dialog_type):
# retval not testable for confirm and prompt because window has been reloaded
check_user_prompt_closed_without_exception(dialog_type, None)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_dismiss(check_user_prompt_closed_without_exception, dialog_type):
# retval not testable for confirm and prompt because window has been reloaded
check_user_prompt_closed_without_exception(dialog_type, None)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
check_user_prompt_not_closed_but_exception(dialog_type)
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
| bsd-3-clause |
mhugo/QGIS | python/plugins/processing/algs/grass7/ext/v_net_bridge.py | 45 | 1665 | # -*- coding: utf-8 -*-
"""
***************************************************************************
v_net_bridge.py
---------------------
Date : December 2015
Copyright : (C) 2015 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'December 2015'
__copyright__ = '(C) 2015, Médéric Ribreux'
from .v_net import incorporatePoints, variableOutput
def processCommand(alg, parameters, context, feedback):
incorporatePoints(alg, parameters, context, feedback)
def processOutputs(alg, parameters, context, feedback):
idx = alg.parameterAsInt(parameters, 'method', context)
operations = alg.parameterDefinition('method').options()
operation = operations[idx]
if operation == 'articulation':
outputParameter = {'output': ['output', 'point', 2, True]}
elif operation == 'bridge':
outputParameter = {'output': ['output', 'line', 1, False]}
variableOutput(alg, outputParameter, parameters, context)
| gpl-2.0 |
rdeheele/odoo | addons/l10n_fr/wizard/__init__.py | 424 | 1462 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import fr_report_bilan
import fr_report_compute_resultant
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
consulo/consulo-python | plugin/src/main/dist/helpers/pydev/_pydev_bundle/_pydev_calltip_util.py | 33 | 4916 | '''
License: Apache 2.0
Author: Yuli Fitterman
'''
# noinspection PyBroadException
import types
from _pydevd_bundle.pydevd_constants import IS_JYTHON, IS_PY3K
try:
import inspect
except:
try:
from _pydev_imps import _pydev_inspect as inspect
except:
import traceback;
traceback.print_exc() # Ok, no inspect available (search will not work)from _pydevd_bundle.pydevd_constants import IS_JYTHON, IS_PY3K
from _pydev_bundle._pydev_imports_tipper import signature_from_docstring
def is_bound_method(obj):
if isinstance(obj, types.MethodType):
return getattr(obj, '__self__', getattr(obj, 'im_self', None)) is not None
else:
return False
def get_class_name(instance):
return getattr(getattr(instance, "__class__", None), "__name__", None)
def get_bound_class_name(obj):
my_self = getattr(obj, '__self__', getattr(obj, 'im_self', None))
if my_self is None:
return None
return get_class_name(my_self)
def get_description(obj):
try:
ob_call = obj.__call__
except:
ob_call = None
if isinstance(obj, type) or type(obj).__name__ == 'classobj':
fob = getattr(obj, '__init__', lambda: None)
if not isinstance(fob, (types.FunctionType, types.MethodType)):
fob = obj
elif is_bound_method(ob_call):
fob = ob_call
else:
fob = obj
argspec = ""
fn_name = None
fn_class = None
if isinstance(fob, (types.FunctionType, types.MethodType)):
spec_info = inspect.getfullargspec(fob) if IS_PY3K else inspect.getargspec(fob)
argspec = inspect.formatargspec(*spec_info)
fn_name = getattr(fob, '__name__', None)
if isinstance(obj, type) or type(obj).__name__ == 'classobj':
fn_name = "__init__"
fn_class = getattr(obj, "__name__", "UnknownClass")
elif is_bound_method(obj) or is_bound_method(ob_call):
fn_class = get_bound_class_name(obj) or "UnknownClass"
else:
fn_name = getattr(fob, '__name__', None)
fn_self = getattr(fob, '__self__', None)
if fn_self is not None and not isinstance(fn_self, types.ModuleType):
fn_class = get_class_name(fn_self)
doc_string = get_docstring(ob_call) if is_bound_method(ob_call) else get_docstring(obj)
return create_method_stub(fn_name, fn_class, argspec, doc_string)
def create_method_stub(fn_name, fn_class, argspec, doc_string):
if fn_name and argspec:
doc_string = "" if doc_string is None else doc_string
fn_stub = create_function_stub(fn_name, argspec, doc_string, indent=1 if fn_class else 0)
if fn_class:
expr = fn_class if fn_name == '__init__' else fn_class + '().' + fn_name
return create_class_stub(fn_class, fn_stub) + "\n" + expr
else:
expr = fn_name
return fn_stub + "\n" + expr
elif doc_string:
if fn_name:
restored_signature, _ = signature_from_docstring(doc_string, fn_name)
if restored_signature:
return create_method_stub(fn_name, fn_class, restored_signature, doc_string)
return create_function_stub('unknown', '(*args, **kwargs)', doc_string) + '\nunknown'
else:
return ''
def get_docstring(obj):
if obj is not None:
try:
if IS_JYTHON:
# Jython
doc = obj.__doc__
if doc is not None:
return doc
from _pydev_bundle import _pydev_jy_imports_tipper
is_method, infos = _pydev_jy_imports_tipper.ismethod(obj)
ret = ''
if is_method:
for info in infos:
ret += info.get_as_doc()
return ret
else:
doc = inspect.getdoc(obj)
if doc is not None:
return doc
except:
pass
else:
return ''
try:
# if no attempt succeeded, try to return repr()...
return repr(obj)
except:
try:
# otherwise the class
return str(obj.__class__)
except:
# if all fails, go to an empty string
return ''
def create_class_stub(class_name, contents):
return "class %s(object):\n%s" % (class_name, contents)
def create_function_stub(fn_name, fn_argspec, fn_docstring, indent=0):
def shift_right(string, prefix):
return ''.join(prefix + line for line in string.splitlines(True))
fn_docstring = shift_right(inspect.cleandoc(fn_docstring), " " * (indent + 1))
ret = '''
def %s%s:
"""%s"""
pass
''' % (fn_name, fn_argspec, fn_docstring)
ret = ret[1:] # remove first /n
ret = ret.replace('\t', " ")
if indent:
prefix = " " * indent
ret = shift_right(ret, prefix)
return ret
| apache-2.0 |
selboo/starl-mangle | webvirtmgr/dashboard/views.py | 1 | 5187 | from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.utils.datastructures import SortedDict
from instance.models import Host
from webvirtmgr.server import ConnServer
from dashboard.forms import HostAddTcpForm, HostAddSshForm
def sort_host(hosts):
"""
Sorts dictionary of hosts by key
"""
if hosts:
sorted_hosts = []
for host in sorted(hosts.iterkeys()):
sorted_hosts.append((host, hosts[host]))
return SortedDict(sorted_hosts)
def index(request):
"""
Index page.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
else:
return HttpResponseRedirect('/dashboard')
def dashboard(request):
"""
Dashboard page.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
def get_hosts_status(hosts):
"""
Function return all hosts all vds on host
"""
all_hosts = {}
for host in hosts:
try:
import socket
socket_host = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_host.settimeout(1)
if host.type == 'ssh':
socket_host.connect((host.hostname, host.port))
else:
socket_host.connect((host.hostname, 16509))
socket_host.close()
status = 1
except Exception as err:
status = err
all_hosts[host.id] = (host.name, host.hostname, status)
return all_hosts
hosts = Host.objects.filter()
hosts_info = get_hosts_status(hosts)
form = None
if request.method == 'POST':
if 'host_del' in request.POST:
del_host = Host.objects.get(id=request.POST.get('host_id', ''))
del_host.delete()
return HttpResponseRedirect(request.get_full_path())
if 'host_tcp_add' in request.POST:
form = HostAddTcpForm(request.POST)
if form.is_valid():
data = form.cleaned_data
new_host = Host(name=data['name'],
hostname=data['hostname'],
type='tcp',
login=data['login'],
password=data['password1']
)
new_host.save()
return HttpResponseRedirect(request.get_full_path())
if 'host_ssh_add' in request.POST:
form = HostAddSshForm(request.POST)
if form.is_valid():
data = form.cleaned_data
new_host = Host(name=data['name'],
hostname=data['hostname'],
type='ssh',
port=data['port'],
login=data['login']
)
new_host.save()
return HttpResponseRedirect(request.get_full_path())
hosts_info = sort_host(hosts_info)
return render_to_response('dashboard.html', {'hosts_info': hosts_info,
'form': form,
},
context_instance=RequestContext(request))
def infrastructure(request):
"""
Infrastructure page.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
hosts = Host.objects.filter().order_by('id')
hosts_vms = {}
host_info = None
host_mem = None
for host in hosts:
try:
import socket
socket_host = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_host.settimeout(1)
if host.type == 'ssh':
socket_host.connect((host.hostname, host.port))
else:
socket_host.connect((host.hostname, 16509))
socket_host.close()
status = 1
except:
status = 2
if status == 1:
conn = ConnServer(host)
host_info = conn.node_get_info()
host_mem = conn.memory_get_usage()
hosts_vms[host.id, host.name, status, host_info[2], host_mem[0], host_mem[2]] = conn.vds_on_cluster()
else:
hosts_vms[host.id, host.name, status, None, None, None] = None
for host in hosts_vms:
hosts_vms[host] = sort_host(hosts_vms[host])
hosts_vms = sort_host(hosts_vms)
return render_to_response('infrastructure.html', {'hosts_info': host_info,
'host_mem': host_mem,
'hosts_vms': hosts_vms,
'hosts': hosts
},
context_instance=RequestContext(request))
def page_setup(request):
return render_to_response('setup.html', {}, context_instance=RequestContext(request))
| apache-2.0 |
andreparrish/python-for-android | python-modules/twisted/twisted/test/test_text.py | 49 | 5450 |
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.python import text
import string
from cStringIO import StringIO
sampleText = \
"""Every attempt to employ mathematical methods in the study of chemical
questions must be considered profoundly irrational and contrary to the
spirit of chemistry ... If mathematical analysis should ever hold a
prominent place in chemistry - an aberration which is happily almost
impossible - it would occasion a rapid and widespread degeneration of that
science.
-- Auguste Comte, Philosophie Positive, Paris, 1838
"""
lineWidth = 72
def set_lineWidth(n):
global lineWidth
lineWidth = n
class WrapTest(unittest.TestCase):
def setUp(self):
self.sampleSplitText = string.split(sampleText)
self.output = text.wordWrap(sampleText, lineWidth)
def test_wordCount(self):
"""Compare the number of words."""
words = []
for line in self.output:
words.extend(string.split(line))
wordCount = len(words)
sampleTextWordCount = len(self.sampleSplitText)
self.failUnlessEqual(wordCount, sampleTextWordCount)
def test_wordMatch(self):
"""Compare the lists of words."""
words = []
for line in self.output:
words.extend(string.split(line))
# Using failUnlessEqual here prints out some
# rather too long lists.
self.failUnless(self.sampleSplitText == words)
def test_lineLength(self):
"""Check the length of the lines."""
failures = []
for line in self.output:
if not len(line) <= lineWidth:
failures.append(len(line))
if failures:
self.fail("%d of %d lines were too long.\n"
"%d < %s" % (len(failures), len(self.output),
lineWidth, failures))
class SplitTest(unittest.TestCase):
"""Tests for text.splitQuoted()"""
def test_oneWord(self):
"""Splitting strings with one-word phrases."""
s = 'This code "works."'
r = text.splitQuoted(s)
self.failUnlessEqual(['This', 'code', 'works.'], r)
def test_multiWord(self):
s = 'The "hairy monkey" likes pie.'
r = text.splitQuoted(s)
self.failUnlessEqual(['The', 'hairy monkey', 'likes', 'pie.'], r)
# Some of the many tests that would fail:
#def test_preserveWhitespace(self):
# phrase = '"MANY SPACES"'
# s = 'With %s between.' % (phrase,)
# r = text.splitQuoted(s)
# self.failUnlessEqual(['With', phrase, 'between.'], r)
#def test_escapedSpace(self):
# s = r"One\ Phrase"
# r = text.splitQuoted(s)
# self.failUnlessEqual(["One Phrase"], r)
class StrFileTest(unittest.TestCase):
def setUp(self):
self.io = StringIO("this is a test string")
def tearDown(self):
pass
def test_1_f(self):
self.assertEquals(False, text.strFile("x", self.io))
def test_1_1(self):
self.assertEquals(True, text.strFile("t", self.io))
def test_1_2(self):
self.assertEquals(True, text.strFile("h", self.io))
def test_1_3(self):
self.assertEquals(True, text.strFile("i", self.io))
def test_1_4(self):
self.assertEquals(True, text.strFile("s", self.io))
def test_1_5(self):
self.assertEquals(True, text.strFile("n", self.io))
def test_1_6(self):
self.assertEquals(True, text.strFile("g", self.io))
def test_3_1(self):
self.assertEquals(True, text.strFile("thi", self.io))
def test_3_2(self):
self.assertEquals(True, text.strFile("his", self.io))
def test_3_3(self):
self.assertEquals(True, text.strFile("is ", self.io))
def test_3_4(self):
self.assertEquals(True, text.strFile("ing", self.io))
def test_3_f(self):
self.assertEquals(False, text.strFile("bla", self.io))
def test_large_1(self):
self.assertEquals(True, text.strFile("this is a test", self.io))
def test_large_2(self):
self.assertEquals(True, text.strFile("is a test string", self.io))
def test_large_f(self):
self.assertEquals(False, text.strFile("ds jhfsa k fdas", self.io))
def test_overlarge_f(self):
self.assertEquals(False, text.strFile("djhsakj dhsa fkhsa s,mdbnfsauiw bndasdf hreew", self.io))
def test_self(self):
self.assertEquals(True, text.strFile("this is a test string", self.io))
def test_insensitive(self):
self.assertEquals(True, text.strFile("ThIs is A test STRING", self.io, False))
class DeprecationTest(unittest.TestCase):
"""
Tests for deprecations in L{twisted.python.text}
"""
def test_docstringLStrip(self):
"""
L{docstringLStrip} is deprecated as of 10.2.0
"""
text.docstringLStrip("")
warningsShown = self.flushWarnings([self.test_docstringLStrip])
self.assertEquals(1, len(warningsShown))
self.assertIdentical(warningsShown[0]['category'], DeprecationWarning)
self.assertEquals(warningsShown[0]['message'],
"twisted.python.text.docstringLStrip was "
"deprecated in Twisted 10.2.0: Please use "
"inspect.getdoc instead.")
testCases = [WrapTest, SplitTest, StrFileTest]
| apache-2.0 |
javiplx/debian-devel | cobbler/action_replicate.py | 4 | 8848 | """
Replicate from a cobbler master.
Copyright 2007-2008, Red Hat, Inc
Michael DeHaan <mdehaan@redhat.com>
Scott Henson <shenson@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os
import os.path
import xmlrpclib
import api as cobbler_api
import utils
from utils import _
from cexceptions import *
import sub_process
class Replicate:
def __init__(self,config):
"""
Constructor
"""
self.config = config
self.settings = config.settings()
self.api = config.api
self.remote = None
self.uri = None
# -------------------------------------------------------
def link_distro(self, distro):
"""
Create the distro links
"""
# find the tree location
dirname = os.path.dirname(distro.kernel)
tokens = dirname.split("/")
tokens = tokens[:-2]
base = "/".join(tokens)
dest_link = os.path.join(self.settings.webdir, "links", distro.name)
# create the links directory only if we are mirroring because with
# SELinux Apache can't symlink to NFS (without some doing)
# be sure not to create broken symlinks, base must exist, use --rync-trees to mirror
if not os.path.exists(dest_link) and os.path.exists(base):
try:
os.symlink(base, dest_link)
except:
# this shouldn't happen but I've seen it ... debug ...
print _("- symlink creation failed: %(base)s, %(dest)s") % { "base" : base, "dest" : dest_link }
# -------------------------------------------------------
def rsync_it(self,from_path,to_path):
from_path = "%s:%s" % (self.host, from_path)
cmd = "rsync -avz %s %s" % (from_path, to_path)
print _("- %s") % cmd
rc = sub_process.call(cmd, shell=True, close_fds=True)
if rc !=0:
raise CX(_("rsync failed"))
def scp_it(self,from_path,to_path):
from_path = "%s:%s" % (self.host, from_path)
cmd = "scp %s %s" % (from_path, to_path)
print _("- %s") % cmd
rc = sub_process.call(cmd, shell=True, close_fds=True)
if rc !=0:
raise CX(_("scp failed"))
# -------------------------------------------------------
def replicate_data(self):
# distros
print _("----- Copying Distros")
local_distros = self.api.distros()
try:
remote_distros = self.remote.get_distros()
except:
raise CX(_("Failed to contact remote server"))
if self.sync_all or self.sync_trees:
print _("----- Rsyncing Distribution Trees")
self.rsync_it(os.path.join(self.settings.webdir,"ks_mirror"),self.settings.webdir)
for distro in remote_distros:
print _("Importing remote distro %s.") % distro['name']
if os.path.exists(distro['kernel']):
new_distro = self.api.new_distro()
new_distro.from_datastruct(distro)
self.link_distro(new_distro)
try:
self.api.add_distro(new_distro)
print _("Copied distro %s.") % distro['name']
except Exception, e:
utils.print_exc(e)
print _("Failed to copy distro %s") % distro['name']
else:
print _("Failed to copy distro %s, content not here yet.") % distro['name']
if self.sync_all or self.sync_repos:
print _("----- Rsyncing Package Mirrors")
self.rsync_it(os.path.join(self.settings.webdir,"repo_mirror"),self.settings.webdir)
if self.sync_all or self.sync_kickstarts:
print _("----- Rsyncing kickstart templates & snippets")
self.scp_it("/etc/cobbler/*.ks","/etc/cobbler")
self.rsync_it("/var/lib/cobbler/kickstarts","/var/lib/cobbler")
self.rsync_it("/var/lib/cobbler/snippets","/var/lib/cobbler")
# repos
# FIXME: check to see if local mirror is here, or if otherwise accessible
print _("----- Copying Repos")
local_repos = self.api.repos()
remote_repos = self.remote.get_repos()
for repo in remote_repos:
print _("Importing remote repo %s.") % repo['name']
new_repo = self.api.new_repo()
new_repo.from_datastruct(repo)
try:
self.api.add_repo(new_repo)
print _("Copied repo %s.") % repo['name']
except Exception, e:
utils.print_exc(e)
print _("Failed to copy repo %s.") % repo['name']
# profiles
print _("----- Copying Profiles")
local_profiles = self.api.profiles()
remote_profiles = self.remote.get_profiles()
# workaround for profile inheritance, must load in order
def __depth_sort(a,b):
return cmp(a["depth"],b["depth"])
remote_profiles.sort(__depth_sort)
for profile in remote_profiles:
print _("Importing remote profile %s" % profile['name'])
new_profile = self.api.new_profile()
new_profile.from_datastruct(profile)
try:
self.api.add_profile(new_profile)
print _("Copyied profile %s.") % profile['name']
except Exception, e:
utils.print_exc(e)
print _("Failed to copy profile %s.") % profile['name']
# images
print _("----- Copying Images")
remote_images = self.remote.get_images()
for image in remote_images:
print _("Importing remote image %s" % image['name'])
new_image = self.api.new_image()
new_image.from_datastruct(image)
try:
self.api.add_image(new_image)
print _("Copyied image %s.") % image['name']
except Exception, e:
utils.print_exc(e)
print _("Failed to copy image %s.") % profile['image']
# systems
# (optional)
if self.include_systems:
print _("----- Copying Systems")
local_systems = self.api.systems()
remote_systems = self.remote.get_systems()
for system in remote_systems:
print _("Importing remote system %s" % system['name'])
new_system = self.api.new_system()
new_system.from_datastruct(system)
try:
self.api.add_system(new_system)
print _("Copied system %s.") % system['name']
except Exception, e:
utils.print_exc(e)
print _("Failed to copy system %s") % system['name']
if self.sync_all or self.sync_triggers:
print _("----- Rsyncing triggers")
self.rsync_it("/var/lib/cobbler/triggers","/var/lib/cobbler")
# -------------------------------------------------------
def run(self, cobbler_master=None, sync_all=False, sync_kickstarts=False,
sync_trees=False, sync_repos=False, sync_triggers=False, include_systems=False):
"""
Get remote profiles and distros and sync them locally
"""
self.sync_all = sync_all
self.sync_kickstarts = sync_kickstarts
self.sync_trees = sync_trees
self.sync_repos = sync_repos
self.sync_triggers = sync_triggers
self.include_systems = include_systems
if cobbler_master is not None:
self.host = cobbler_master
self.uri = 'http://%s/cobbler_api' % cobbler_master
elif len(self.settings.cobbler_master) > 0:
self.host = self.settings.cobbler_master
self.uri = 'http://%s/cobbler_api' % self.settings.cobbler_master
else:
raise CX(_('No cobbler master specified, try --master.'))
print _("XMLRPC endpoint: %s") % self.uri
self.remote = xmlrpclib.Server(self.uri)
self.replicate_data()
print _("----- Syncing")
self.api.sync()
print _("----- Done")
| gpl-2.0 |
ychen820/microblog | flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langhebrewmodel.py | 2763 | 11318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
win1255_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HebrewLangModel = (
0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
)
Win1255HebrewModel = {
'charToOrderMap': win1255_CharToOrderMap,
'precedenceMatrix': HebrewLangModel,
'mTypicalPositiveRatio': 0.984004,
'keepEnglishLetter': False,
'charsetName': "windows-1255"
}
# flake8: noqa
| bsd-3-clause |
edx/edx-platform | common/lib/xmodule/xmodule/tests/test_conditional.py | 3 | 19144 | # lint-amnesty, pylint: disable=missing-module-docstring
import json
import unittest
from unittest.mock import Mock, patch
from fs.memoryfs import MemoryFS
from lxml import etree
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
from web_fragments.fragment import Fragment
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.conditional_module import ConditionalBlock
from xmodule.error_module import NonStaffErrorBlock
from xmodule.modulestore.xml import CourseLocationManager, ImportSystem, XMLModuleStore
from xmodule.tests import DATA_DIR, get_test_descriptor_system, get_test_system
from xmodule.tests.xml import XModuleXmlImportTest
from xmodule.tests.xml import factories as xml
from xmodule.validation import StudioValidationMessage
from xmodule.x_module import AUTHOR_VIEW, STUDENT_VIEW
ORG = 'test_org'
COURSE = 'conditional' # name of directory with course data
class DummySystem(ImportSystem): # lint-amnesty, pylint: disable=abstract-method, missing-class-docstring
@patch('xmodule.modulestore.xml.OSFS', lambda directory: MemoryFS())
def __init__(self, load_error_modules):
xmlstore = XMLModuleStore("data_dir", source_dirs=[], load_error_modules=load_error_modules)
super().__init__(
xmlstore=xmlstore,
course_id=CourseKey.from_string('/'.join([ORG, COURSE, 'test_run'])),
course_dir='test_dir',
error_tracker=Mock(),
load_error_modules=load_error_modules,
)
def render_template(self, template, context): # lint-amnesty, pylint: disable=method-hidden
raise Exception("Shouldn't be called")
class ConditionalBlockFactory(xml.XmlImportFactory):
"""
Factory for generating ConditionalBlock for testing purposes
"""
tag = 'conditional'
class ConditionalFactory:
"""
A helper class to create a conditional module and associated source and child modules
to allow for testing.
"""
@staticmethod
def create(system, source_is_error_module=False, source_visible_to_staff_only=False):
"""
return a dict of modules: the conditional with a single source and a single child.
Keys are 'cond_module', 'source_module', and 'child_module'.
if the source_is_error_module flag is set, create a real ErrorBlock for the source.
"""
descriptor_system = get_test_descriptor_system()
# construct source descriptor and module:
source_location = BlockUsageLocator(CourseLocator("edX", "conditional_test", "test_run", deprecated=True),
"problem", "SampleProblem", deprecated=True)
if source_is_error_module:
# Make an error descriptor and module
source_descriptor = NonStaffErrorBlock.from_xml(
'some random xml data',
system,
id_generator=CourseLocationManager(source_location.course_key),
error_msg='random error message'
)
else:
source_descriptor = Mock(name='source_descriptor')
source_descriptor.location = source_location
source_descriptor.visible_to_staff_only = source_visible_to_staff_only
source_descriptor.runtime = descriptor_system
source_descriptor.render = lambda view, context=None: descriptor_system.render(source_descriptor, view, context)
# construct other descriptors:
child_descriptor = Mock(name='child_descriptor')
child_descriptor.visible_to_staff_only = False
child_descriptor._xmodule.student_view.return_value = Fragment(content='<p>This is a secret</p>') # lint-amnesty, pylint: disable=protected-access
child_descriptor.student_view = child_descriptor._xmodule.student_view # lint-amnesty, pylint: disable=protected-access
child_descriptor.displayable_items.return_value = [child_descriptor]
child_descriptor.runtime = descriptor_system
child_descriptor.xmodule_runtime = get_test_system()
child_descriptor.render = lambda view, context=None: descriptor_system.render(child_descriptor, view, context)
child_descriptor.location = source_location.replace(category='html', name='child')
def visible_to_nonstaff_users(desc):
"""
Returns if the object is visible to nonstaff users.
"""
return not desc.visible_to_staff_only
def load_item(usage_id, for_parent=None): # pylint: disable=unused-argument
"""Test-only implementation of load_item that simply returns static xblocks."""
return {
child_descriptor.location: child_descriptor,
source_location: source_descriptor
}.get(usage_id)
descriptor_system.load_item = load_item
system.descriptor_runtime = descriptor_system
# construct conditional module:
cond_location = BlockUsageLocator(CourseLocator("edX", "conditional_test", "test_run", deprecated=True),
"conditional", "SampleConditional", deprecated=True)
field_data = DictFieldData({
'data': '<conditional/>',
'conditional_attr': 'attempted',
'conditional_value': 'true',
'xml_attributes': {'attempted': 'true'},
'children': [child_descriptor.location],
})
cond_descriptor = ConditionalBlock(
descriptor_system,
field_data,
ScopeIds(None, None, cond_location, cond_location)
)
cond_descriptor.xmodule_runtime = system
system.get_module = lambda desc: desc if visible_to_nonstaff_users(desc) else None
cond_descriptor.get_required_blocks = [
system.get_module(source_descriptor),
]
# return dict:
return {'cond_module': cond_descriptor,
'source_module': source_descriptor,
'child_module': child_descriptor}
class ConditionalBlockBasicTest(unittest.TestCase):
"""
Make sure that conditional module works, using mocks for
other modules.
"""
def setUp(self):
super().setUp()
self.test_system = get_test_system()
def test_icon_class(self):
'''verify that get_icon_class works independent of condition satisfaction'''
modules = ConditionalFactory.create(self.test_system)
for attempted in ["false", "true"]:
for icon_class in ['other', 'problem', 'video']:
modules['source_module'].is_attempted = attempted
modules['child_module'].get_icon_class = lambda: icon_class # lint-amnesty, pylint: disable=cell-var-from-loop
assert modules['cond_module'].get_icon_class() == icon_class
def test_get_html(self):
modules = ConditionalFactory.create(self.test_system)
# because get_test_system returns the repr of the context dict passed to render_template,
# we reverse it here
html = modules['cond_module'].render(STUDENT_VIEW).content
expected = modules['cond_module'].xmodule_runtime.render_template('conditional_ajax.html', {
'ajax_url': modules['cond_module'].ajax_url,
'element_id': 'i4x-edX-conditional_test-conditional-SampleConditional',
'depends': 'i4x-edX-conditional_test-problem-SampleProblem',
})
assert expected == html
def test_handle_ajax(self):
modules = ConditionalFactory.create(self.test_system)
modules['cond_module'].save()
modules['source_module'].is_attempted = "false"
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
print("ajax: ", ajax)
fragments = ajax['fragments']
assert not any(('This is a secret' in item['content']) for item in fragments)
# now change state of the capa problem to make it completed
modules['source_module'].is_attempted = "true"
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
print("post-attempt ajax: ", ajax)
fragments = ajax['fragments']
assert any(('This is a secret' in item['content']) for item in fragments)
def test_error_as_source(self):
'''
Check that handle_ajax works properly if the source is really an ErrorBlock,
and that the condition is not satisfied.
'''
modules = ConditionalFactory.create(self.test_system, source_is_error_module=True)
modules['cond_module'].save()
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
fragments = ajax['fragments']
assert not any(('This is a secret' in item['content']) for item in fragments)
@patch('xmodule.conditional_module.log')
def test_conditional_with_staff_only_source_module(self, mock_log):
modules = ConditionalFactory.create(
self.test_system,
source_visible_to_staff_only=True,
)
cond_module = modules['cond_module']
cond_module.save()
cond_module.is_attempted = "false"
cond_module.handle_ajax('', '')
assert not mock_log.warn.called
assert None in cond_module.get_required_blocks
class ConditionalBlockXmlTest(unittest.TestCase):
"""
Make sure ConditionalBlock works, by loading data in from an XML-defined course.
"""
@staticmethod
def get_system(load_error_modules=True):
'''Get a dummy system'''
return DummySystem(load_error_modules)
def setUp(self):
super().setUp()
self.test_system = get_test_system()
def get_course(self, name):
"""Get a test course by directory name. If there's more than one, error."""
print(f"Importing {name}")
modulestore = XMLModuleStore(DATA_DIR, source_dirs=[name])
courses = modulestore.get_courses()
self.modulestore = modulestore # lint-amnesty, pylint: disable=attribute-defined-outside-init
assert len(courses) == 1
return courses[0]
@patch('xmodule.x_module.descriptor_global_local_resource_url')
def test_conditional_module(self, _):
"""Make sure that conditional module works"""
print("Starting import")
course = self.get_course('conditional_and_poll')
print("Course: ", course)
print("id: ", course.id)
def inner_get_module(descriptor):
if isinstance(descriptor, BlockUsageLocator):
location = descriptor
descriptor = self.modulestore.get_item(location, depth=None)
descriptor.xmodule_runtime = get_test_system()
descriptor.xmodule_runtime.descriptor_runtime = descriptor._runtime # pylint: disable=protected-access
descriptor.xmodule_runtime.get_module = inner_get_module
return descriptor
# edx - HarvardX
# cond_test - ER22x
location = BlockUsageLocator(CourseLocator("HarvardX", "ER22x", "2013_Spring", deprecated=True),
"conditional", "condone", deprecated=True)
def replace_urls(text, staticfiles_prefix=None, replace_prefix='/static/', course_namespace=None): # lint-amnesty, pylint: disable=unused-argument
return text
self.test_system.replace_urls = replace_urls
self.test_system.get_module = inner_get_module
module = inner_get_module(location)
print("module: ", module)
print("module children: ", module.get_children())
print("module display items (children): ", module.get_display_items())
html = module.render(STUDENT_VIEW).content
print("html type: ", type(html))
print("html: ", html)
html_expect = module.xmodule_runtime.render_template(
'conditional_ajax.html',
{
# Test ajax url is just usage-id / handler_name
'ajax_url': f'{str(location)}/xmodule_handler',
'element_id': 'i4x-HarvardX-ER22x-conditional-condone',
'depends': 'i4x-HarvardX-ER22x-problem-choiceprob'
}
)
assert html == html_expect
gdi = module.get_display_items()
print("gdi=", gdi)
ajax = json.loads(module.handle_ajax('', ''))
module.save()
print("ajax: ", ajax)
fragments = ajax['fragments']
assert not any(('This is a secret' in item['content']) for item in fragments)
# Now change state of the capa problem to make it completed
inner_module = inner_get_module(location.replace(category="problem", name='choiceprob'))
inner_module.attempts = 1
# Save our modifications to the underlying KeyValueStore so they can be persisted
inner_module.save()
ajax = json.loads(module.handle_ajax('', ''))
module.save()
print("post-attempt ajax: ", ajax)
fragments = ajax['fragments']
assert any(('This is a secret' in item['content']) for item in fragments)
maxDiff = None
def test_conditional_module_with_empty_sources_list(self):
"""
If a ConditionalBlock is initialized with an empty sources_list, we assert that the sources_list is set
via generating UsageKeys from the values in xml_attributes['sources']
"""
dummy_system = Mock()
dummy_location = BlockUsageLocator(CourseLocator("edX", "conditional_test", "test_run"),
"conditional", "SampleConditional")
dummy_scope_ids = ScopeIds(None, None, dummy_location, dummy_location)
dummy_field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'sources': 'i4x://HarvardX/ER22x/poll_question/T15_poll'},
'children': None,
})
conditional = ConditionalBlock(
dummy_system,
dummy_field_data,
dummy_scope_ids,
)
new_run = conditional.location.course_key.run # lint-amnesty, pylint: disable=unused-variable
assert conditional.sources_list[0] == BlockUsageLocator.from_string(conditional.xml_attributes['sources'])\
.replace(run=dummy_location.course_key.run)
def test_conditional_module_parse_sources(self):
dummy_system = Mock()
dummy_location = BlockUsageLocator(CourseLocator("edX", "conditional_test", "test_run"),
"conditional", "SampleConditional")
dummy_scope_ids = ScopeIds(None, None, dummy_location, dummy_location)
dummy_field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'sources': 'i4x://HarvardX/ER22x/poll_question/T15_poll;i4x://HarvardX/ER22x/poll_question/T16_poll'}, # lint-amnesty, pylint: disable=line-too-long
'children': None,
})
conditional = ConditionalBlock(
dummy_system,
dummy_field_data,
dummy_scope_ids,
)
assert conditional.parse_sources(conditional.xml_attributes) == ['i4x://HarvardX/ER22x/poll_question/T15_poll',
'i4x://HarvardX/ER22x/poll_question/T16_poll']
def test_conditional_module_parse_attr_values(self):
root = '<conditional attempted="false"></conditional>'
xml_object = etree.XML(root)
definition = ConditionalBlock.definition_from_xml(xml_object, Mock())[0]
expected_definition = {
'show_tag_list': [],
'conditional_attr': 'attempted',
'conditional_value': 'false',
'conditional_message': ''
}
assert definition == expected_definition
def test_presence_attributes_in_xml_attributes(self):
modules = ConditionalFactory.create(self.test_system)
modules['cond_module'].save()
modules['cond_module'].definition_to_xml(Mock())
expected_xml_attributes = {
'attempted': 'true',
'message': 'You must complete {link} before you can access this unit.',
'sources': ''
}
self.assertDictEqual(modules['cond_module'].xml_attributes, expected_xml_attributes)
class ConditionalBlockStudioTest(XModuleXmlImportTest):
"""
Unit tests for how conditional test interacts with Studio.
"""
def setUp(self):
super().setUp()
course = xml.CourseFactory.build()
sequence = xml.SequenceFactory.build(parent=course)
conditional = ConditionalBlockFactory(
parent=sequence,
attribs={
'group_id_to_child': '{"0": "i4x://edX/xml_test_course/html/conditional_0"}'
}
)
xml.HtmlFactory(parent=conditional, url_name='conditional_0', text='This is a secret HTML')
self.course = self.process_xml(course)
self.sequence = self.course.get_children()[0]
self.conditional = self.sequence.get_children()[0]
self.module_system = get_test_system()
self.module_system.descriptor_runtime = self.course._runtime # pylint: disable=protected-access
user = Mock(username='ma', email='ma@edx.org', is_staff=False, is_active=True)
self.conditional.bind_for_student(
self.module_system,
user.id
)
def test_render_author_view(self,):
"""
Test the rendering of the Studio author view.
"""
def create_studio_context(root_xblock, is_unit_page):
"""
Context for rendering the studio "author_view".
"""
return {
'reorderable_items': set(),
'root_xblock': root_xblock,
'is_unit_page': is_unit_page
}
context = create_studio_context(self.conditional, False)
html = self.module_system.render(self.conditional, AUTHOR_VIEW, context).content
assert 'This is a secret HTML' in html
context = create_studio_context(self.sequence, True)
html = self.module_system.render(self.conditional, AUTHOR_VIEW, context).content
assert 'This is a secret HTML' not in html
def test_non_editable_settings(self):
"""
Test the settings that are marked as "non-editable".
"""
non_editable_metadata_fields = self.conditional.non_editable_metadata_fields
assert ConditionalBlock.due in non_editable_metadata_fields
def test_validation_messages(self):
"""
Test the validation message for a correctly configured conditional.
"""
self.conditional.sources_list = None
validation = self.conditional.validate()
assert validation.summary.text == 'This component has no source components configured yet.'
assert validation.summary.type == StudioValidationMessage.NOT_CONFIGURED
assert validation.summary.action_class == 'edit-button'
assert validation.summary.action_label == 'Configure list of sources'
| agpl-3.0 |
helldorado/ansible | lib/ansible/modules/storage/zfs/zfs.py | 33 | 7963 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zfs
short_description: Manage zfs
description:
- Manages ZFS file systems, volumes, clones and snapshots
version_added: "1.1"
options:
name:
description:
- File system, snapshot or volume name e.g. C(rpool/myfs).
required: true
state:
description:
- Whether to create (C(present)), or remove (C(absent)) a
file system, snapshot or volume. All parents/children
will be created/destroyed as needed to reach the desired state.
choices: [ absent, present ]
required: true
origin:
description:
- Snapshot from which to create a clone.
extra_zfs_properties:
description:
- A dictionary of zfs properties to be set.
- See the zfs(8) man page for more information.
version_added: "2.5"
author:
- Johan Wiren (@johanwiren)
'''
EXAMPLES = '''
- name: Create a new file system called myfs in pool rpool with the setuid property turned off
zfs:
name: rpool/myfs
state: present
extra_zfs_properties:
setuid: off
- name: Create a new volume called myvol in pool rpool.
zfs:
name: rpool/myvol
state: present
extra_zfs_properties:
volsize: 10M
- name: Create a snapshot of rpool/myfs file system.
zfs:
name: rpool/myfs@mysnapshot
state: present
- name: Create a new file system called myfs2 with snapdir enabled
zfs:
name: rpool/myfs2
state: present
extra_zfs_properties:
snapdir: enabled
- name: Create a new file system by cloning a snapshot
zfs:
name: rpool/cloned_fs
state: present
origin: rpool/myfs@mysnapshot
- name: Destroy a filesystem
zfs:
name: rpool/myfs
state: absent
'''
import os
from ansible.module_utils.basic import AnsibleModule
class Zfs(object):
def __init__(self, module, name, properties):
self.module = module
self.name = name
self.properties = properties
self.changed = False
self.zfs_cmd = module.get_bin_path('zfs', True)
self.zpool_cmd = module.get_bin_path('zpool', True)
self.pool = name.split('/')[0]
self.is_solaris = os.uname()[0] == 'SunOS'
self.is_openzfs = self.check_openzfs()
self.enhanced_sharing = self.check_enhanced_sharing()
def check_openzfs(self):
cmd = [self.zpool_cmd]
cmd.extend(['get', 'version'])
cmd.append(self.pool)
(rc, out, err) = self.module.run_command(cmd, check_rc=True)
version = out.splitlines()[-1].split()[2]
if version == '-':
return True
if int(version) == 5000:
return True
return False
def check_enhanced_sharing(self):
if self.is_solaris and not self.is_openzfs:
cmd = [self.zpool_cmd]
cmd.extend(['get', 'version'])
cmd.append(self.pool)
(rc, out, err) = self.module.run_command(cmd, check_rc=True)
version = out.splitlines()[-1].split()[2]
if int(version) >= 34:
return True
return False
def exists(self):
cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name]
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
return True
else:
return False
def create(self):
if self.module.check_mode:
self.changed = True
return
properties = self.properties
origin = self.module.params.get('origin', None)
cmd = [self.zfs_cmd]
if "@" in self.name:
action = 'snapshot'
elif origin:
action = 'clone'
else:
action = 'create'
cmd.append(action)
if action in ['create', 'clone']:
cmd += ['-p']
if properties:
for prop, value in properties.items():
if prop == 'volsize':
cmd += ['-V', value]
elif prop == 'volblocksize':
cmd += ['-b', value]
else:
cmd += ['-o', '%s="%s"' % (prop, value)]
if origin and action == 'clone':
cmd.append(origin)
cmd.append(self.name)
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def destroy(self):
if self.module.check_mode:
self.changed = True
return
cmd = [self.zfs_cmd, 'destroy', '-R', self.name]
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def set_property(self, prop, value):
if self.module.check_mode:
self.changed = True
return
cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name]
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def set_properties_if_changed(self):
current_properties = self.get_current_properties()
for prop, value in self.properties.items():
if current_properties.get(prop, None) != value:
self.set_property(prop, value)
def get_current_properties(self):
cmd = [self.zfs_cmd, 'get', '-H']
if self.enhanced_sharing:
cmd += ['-e']
cmd += ['all', self.name]
rc, out, err = self.module.run_command(" ".join(cmd))
properties = dict()
for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]:
if source == 'local':
properties[prop] = value
# Add alias for enhanced sharing properties
if self.enhanced_sharing:
properties['sharenfs'] = properties.get('share.nfs', None)
properties['sharesmb'] = properties.get('share.smb', None)
return properties
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'present']),
origin=dict(type='str', default=None),
extra_zfs_properties=dict(type='dict', default={}),
),
supports_check_mode=True,
)
state = module.params.get('state')
name = module.params.get('name')
if module.params.get('origin') and '@' in name:
module.fail_json(msg='cannot specify origin when operating on a snapshot')
# Reverse the boolification of zfs properties
for prop, value in module.params['extra_zfs_properties'].items():
if isinstance(value, bool):
if value is True:
module.params['extra_zfs_properties'][prop] = 'on'
else:
module.params['extra_zfs_properties'][prop] = 'off'
else:
module.params['extra_zfs_properties'][prop] = value
result = dict(
name=name,
state=state,
)
zfs = Zfs(module, name, module.params['extra_zfs_properties'])
if state == 'present':
if zfs.exists():
zfs.set_properties_if_changed()
else:
zfs.create()
elif state == 'absent':
if zfs.exists():
zfs.destroy()
result.update(zfs.properties)
result['changed'] = zfs.changed
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
dgladkov/django | tests/null_fk_ordering/tests.py | 381 | 2012 | from __future__ import unicode_literals
from django.test import TestCase
from .models import Article, Author, Comment, Forum, Post, SystemInfo
class NullFkOrderingTests(TestCase):
def test_ordering_across_null_fk(self):
"""
Regression test for #7512
ordering across nullable Foreign Keys shouldn't exclude results
"""
author_1 = Author.objects.create(name='Tom Jones')
author_2 = Author.objects.create(name='Bob Smith')
Article.objects.create(title='No author on this article')
Article.objects.create(author=author_1, title='This article written by Tom Jones')
Article.objects.create(author=author_2, title='This article written by Bob Smith')
# We can't compare results directly (since different databases sort NULLs to
# different ends of the ordering), but we can check that all results are
# returned.
self.assertEqual(len(list(Article.objects.all())), 3)
s = SystemInfo.objects.create(system_name='System Info')
f = Forum.objects.create(system_info=s, forum_name='First forum')
p = Post.objects.create(forum=f, title='First Post')
Comment.objects.create(post=p, comment_text='My first comment')
Comment.objects.create(comment_text='My second comment')
s2 = SystemInfo.objects.create(system_name='More System Info')
f2 = Forum.objects.create(system_info=s2, forum_name='Second forum')
p2 = Post.objects.create(forum=f2, title='Second Post')
Comment.objects.create(comment_text='Another first comment')
Comment.objects.create(post=p2, comment_text='Another second comment')
# We have to test this carefully. Some databases sort NULL values before
# everything else, some sort them afterwards. So we extract the ordered list
# and check the length. Before the fix, this list was too short (some values
# were omitted).
self.assertEqual(len(list(Comment.objects.all())), 4)
| bsd-3-clause |
dfang/odoo | addons/account_asset/wizard/asset_modify.py | 25 | 4088 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from lxml import etree
from odoo import api, fields, models, _
from odoo.osv.orm import setup_modifiers
class AssetModify(models.TransientModel):
_name = 'asset.modify'
_description = 'Modify Asset'
name = fields.Text(string='Reason', required=True)
method_number = fields.Integer(string='Number of Depreciations', required=True)
method_period = fields.Integer(string='Period Length')
method_end = fields.Date(string='Ending date')
asset_method_time = fields.Char(compute='_get_asset_method_time', string='Asset Method Time', readonly=True)
@api.one
def _get_asset_method_time(self):
if self.env.context.get('active_id'):
asset = self.env['account.asset.asset'].browse(self.env.context.get('active_id'))
self.asset_method_time = asset.method_time
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
result = super(AssetModify, self).fields_view_get(view_id, view_type, toolbar=toolbar, submenu=submenu)
asset_id = self.env.context.get('active_id')
active_model = self.env.context.get('active_model')
if active_model == 'account.asset.asset' and asset_id:
asset = self.env['account.asset.asset'].browse(asset_id)
doc = etree.XML(result['arch'])
if asset.method_time == 'number' and doc.xpath("//field[@name='method_end']"):
node = doc.xpath("//field[@name='method_end']")[0]
node.set('invisible', '1')
setup_modifiers(node, result['fields']['method_end'])
elif asset.method_time == 'end' and doc.xpath("//field[@name='method_number']"):
node = doc.xpath("//field[@name='method_number']")[0]
node.set('invisible', '1')
setup_modifiers(node, result['fields']['method_number'])
result['arch'] = etree.tostring(doc)
return result
@api.model
def default_get(self, fields):
res = super(AssetModify, self).default_get(fields)
asset_id = self.env.context.get('active_id')
asset = self.env['account.asset.asset'].browse(asset_id)
if 'name' in fields:
res.update({'name': asset.name})
if 'method_number' in fields and asset.method_time == 'number':
res.update({'method_number': asset.method_number})
if 'method_period' in fields:
res.update({'method_period': asset.method_period})
if 'method_end' in fields and asset.method_time == 'end':
res.update({'method_end': asset.method_end})
if self.env.context.get('active_id'):
active_asset = self.env['account.asset.asset'].browse(self.env.context.get('active_id'))
res['asset_method_time'] = active_asset.method_time
return res
@api.multi
def modify(self):
""" Modifies the duration of asset for calculating depreciation
and maintains the history of old values, in the chatter.
"""
asset_id = self.env.context.get('active_id', False)
asset = self.env['account.asset.asset'].browse(asset_id)
old_values = {
'method_number': asset.method_number,
'method_period': asset.method_period,
'method_end': asset.method_end,
}
asset_vals = {
'method_number': self.method_number,
'method_period': self.method_period,
'method_end': self.method_end,
}
asset.write(asset_vals)
asset.compute_depreciation_board()
tracked_fields = self.env['account.asset.asset'].fields_get(['method_number', 'method_period', 'method_end'])
changes, tracking_value_ids = asset._message_track(tracked_fields, old_values)
if changes:
asset.message_post(subject=_('Depreciation board modified'), body=self.name, tracking_value_ids=tracking_value_ids)
return {'type': 'ir.actions.act_window_close'}
| agpl-3.0 |
rec/DMXIS | Macros/Python/telnetlib.py | 8 | 22465 | r"""TELNET client class.
Based on RFC 854: TELNET Protocol Specification, by J. Postel and
J. Reynolds
Example:
>>> from telnetlib import Telnet
>>> tn = Telnet('www.python.org', 79) # connect to finger port
>>> tn.write('guido\r\n')
>>> print tn.read_all()
Login Name TTY Idle When Where
guido Guido van Rossum pts/2 <Dec 2 11:10> snag.cnri.reston..
>>>
Note that read_all() won't read until eof -- it just reads some data
-- but it guarantees to read at least one byte unless EOF is hit.
It is possible to pass a Telnet object to select.select() in order to
wait until more data is available. Note that in this case,
read_eager() may return '' even if there was data on the socket,
because the protocol negotiation may have eaten the data. This is why
EOFError is needed in some cases to distinguish between "no data" and
"connection closed" (since the socket also appears ready for reading
when it is closed).
To do:
- option negotiation
- timeout should be intrinsic to the connection object instead of an
option on one of the read calls only
"""
# Imported modules
import sys
import socket
import select
__all__ = ["Telnet"]
# Tunable parameters
DEBUGLEVEL = 0
# Telnet protocol defaults
TELNET_PORT = 23
# Telnet protocol characters (don't change)
IAC = chr(255) # "Interpret As Command"
DONT = chr(254)
DO = chr(253)
WONT = chr(252)
WILL = chr(251)
theNULL = chr(0)
SE = chr(240) # Subnegotiation End
NOP = chr(241) # No Operation
DM = chr(242) # Data Mark
BRK = chr(243) # Break
IP = chr(244) # Interrupt process
AO = chr(245) # Abort output
AYT = chr(246) # Are You There
EC = chr(247) # Erase Character
EL = chr(248) # Erase Line
GA = chr(249) # Go Ahead
SB = chr(250) # Subnegotiation Begin
# Telnet protocol options code (don't change)
# These ones all come from arpa/telnet.h
BINARY = chr(0) # 8-bit data path
ECHO = chr(1) # echo
RCP = chr(2) # prepare to reconnect
SGA = chr(3) # suppress go ahead
NAMS = chr(4) # approximate message size
STATUS = chr(5) # give status
TM = chr(6) # timing mark
RCTE = chr(7) # remote controlled transmission and echo
NAOL = chr(8) # negotiate about output line width
NAOP = chr(9) # negotiate about output page size
NAOCRD = chr(10) # negotiate about CR disposition
NAOHTS = chr(11) # negotiate about horizontal tabstops
NAOHTD = chr(12) # negotiate about horizontal tab disposition
NAOFFD = chr(13) # negotiate about formfeed disposition
NAOVTS = chr(14) # negotiate about vertical tab stops
NAOVTD = chr(15) # negotiate about vertical tab disposition
NAOLFD = chr(16) # negotiate about output LF disposition
XASCII = chr(17) # extended ascii character set
LOGOUT = chr(18) # force logout
BM = chr(19) # byte macro
DET = chr(20) # data entry terminal
SUPDUP = chr(21) # supdup protocol
SUPDUPOUTPUT = chr(22) # supdup output
SNDLOC = chr(23) # send location
TTYPE = chr(24) # terminal type
EOR = chr(25) # end or record
TUID = chr(26) # TACACS user identification
OUTMRK = chr(27) # output marking
TTYLOC = chr(28) # terminal location number
VT3270REGIME = chr(29) # 3270 regime
X3PAD = chr(30) # X.3 PAD
NAWS = chr(31) # window size
TSPEED = chr(32) # terminal speed
LFLOW = chr(33) # remote flow control
LINEMODE = chr(34) # Linemode option
XDISPLOC = chr(35) # X Display Location
OLD_ENVIRON = chr(36) # Old - Environment variables
AUTHENTICATION = chr(37) # Authenticate
ENCRYPT = chr(38) # Encryption option
NEW_ENVIRON = chr(39) # New - Environment variables
# the following ones come from
# http://www.iana.org/assignments/telnet-options
# Unfortunately, that document does not assign identifiers
# to all of them, so we are making them up
TN3270E = chr(40) # TN3270E
XAUTH = chr(41) # XAUTH
CHARSET = chr(42) # CHARSET
RSP = chr(43) # Telnet Remote Serial Port
COM_PORT_OPTION = chr(44) # Com Port Control Option
SUPPRESS_LOCAL_ECHO = chr(45) # Telnet Suppress Local Echo
TLS = chr(46) # Telnet Start TLS
KERMIT = chr(47) # KERMIT
SEND_URL = chr(48) # SEND-URL
FORWARD_X = chr(49) # FORWARD_X
PRAGMA_LOGON = chr(138) # TELOPT PRAGMA LOGON
SSPI_LOGON = chr(139) # TELOPT SSPI LOGON
PRAGMA_HEARTBEAT = chr(140) # TELOPT PRAGMA HEARTBEAT
EXOPL = chr(255) # Extended-Options-List
NOOPT = chr(0)
class Telnet:
"""Telnet interface class.
An instance of this class represents a connection to a telnet
server. The instance is initially not connected; the open()
method must be used to establish a connection. Alternatively, the
host name and optional port number can be passed to the
constructor, too.
Don't try to reopen an already connected instance.
This class has many read_*() methods. Note that some of them
raise EOFError when the end of the connection is read, because
they can return an empty string for other reasons. See the
individual doc strings.
read_until(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
read_all()
Read all data until EOF; may block.
read_some()
Read at least one byte or EOF; may block.
read_very_eager()
Read all data available already queued or on the socket,
without blocking.
read_eager()
Read either data already queued or some data available on the
socket, without blocking.
read_lazy()
Read all data in the raw queue (processing it first), without
doing any socket I/O.
read_very_lazy()
Reads all data in the cooked queue, without doing any socket
I/O.
read_sb_data()
Reads available data between SB ... SE sequence. Don't block.
set_option_negotiation_callback(callback)
Each time a telnet option is read on the input flow, this callback
(if set) is called with the following parameters :
callback(telnet socket, command, option)
option will be chr(0) when there is no option.
No other action is done afterwards by telnetlib.
"""
def __init__(self, host=None, port=0,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Constructor.
When called without arguments, create an unconnected instance.
With a hostname argument, it connects the instance; port number
and timeout are optional.
"""
self.debuglevel = DEBUGLEVEL
self.host = host
self.port = port
self.timeout = timeout
self.sock = None
self.rawq = ''
self.irawq = 0
self.cookedq = ''
self.eof = 0
self.iacseq = '' # Buffer for IAC sequence.
self.sb = 0 # flag for SB and SE sequence.
self.sbdataq = ''
self.option_callback = None
if host is not None:
self.open(host, port, timeout)
def open(self, host, port=0, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Connect to a host.
The optional second argument is the port number, which
defaults to the standard telnet port (23).
Don't try to reopen an already connected instance.
"""
self.eof = 0
if not port:
port = TELNET_PORT
self.host = host
self.port = port
self.timeout = timeout
self.sock = socket.create_connection((host, port), timeout)
def __del__(self):
"""Destructor -- close the connection."""
self.close()
def msg(self, msg, *args):
"""Print a debug message, when the debug level is > 0.
If extra arguments are present, they are substituted in the
message using the standard string formatting operator.
"""
if self.debuglevel > 0:
print 'Telnet(%s,%d):' % (self.host, self.port),
if args:
print msg % args
else:
print msg
def set_debuglevel(self, debuglevel):
"""Set the debug level.
The higher it is, the more debug output you get (on sys.stdout).
"""
self.debuglevel = debuglevel
def close(self):
"""Close the connection."""
if self.sock:
self.sock.close()
self.sock = 0
self.eof = 1
self.iacseq = ''
self.sb = 0
def get_socket(self):
"""Return the socket object used internally."""
return self.sock
def fileno(self):
"""Return the fileno() of the socket object used internally."""
return self.sock.fileno()
def write(self, buffer):
"""Write a string to the socket, doubling any IAC characters.
Can block if the connection is blocked. May raise
socket.error if the connection is closed.
"""
if IAC in buffer:
buffer = buffer.replace(IAC, IAC+IAC)
self.msg("send %r", buffer)
self.sock.sendall(buffer)
def read_until(self, match, timeout=None):
"""Read until a given string is encountered or until timeout.
When no match is found, return whatever is available instead,
possibly the empty string. Raise EOFError if the connection
is closed and no cooked data is available.
"""
n = len(match)
self.process_rawq()
i = self.cookedq.find(match)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
s_reply = ([self], [], [])
s_args = s_reply
if timeout is not None:
s_args = s_args + (timeout,)
from time import time
time_start = time()
while not self.eof and select.select(*s_args) == s_reply:
i = max(0, len(self.cookedq)-n)
self.fill_rawq()
self.process_rawq()
i = self.cookedq.find(match, i)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
if timeout is not None:
elapsed = time() - time_start
if elapsed >= timeout:
break
s_args = s_reply + (timeout-elapsed,)
return self.read_very_lazy()
def read_all(self):
"""Read all data until EOF; block until connection closed."""
self.process_rawq()
while not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
def read_some(self):
"""Read at least one byte of cooked data unless EOF is hit.
Return '' if EOF is hit. Block if no data is immediately
available.
"""
self.process_rawq()
while not self.cookedq and not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
def read_very_eager(self):
"""Read everything that's possible without blocking in I/O (eager).
Raise EOFError if connection closed and no cooked data
available. Return '' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
"""
self.process_rawq()
while not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
def read_eager(self):
"""Read readily available data.
Raise EOFError if connection closed and no cooked data
available. Return '' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
"""
self.process_rawq()
while not self.cookedq and not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
def read_lazy(self):
"""Process and return data that's already in the queues (lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block
unless in the midst of an IAC sequence.
"""
self.process_rawq()
return self.read_very_lazy()
def read_very_lazy(self):
"""Return any data available in the cooked queue (very lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block.
"""
buf = self.cookedq
self.cookedq = ''
if not buf and self.eof and not self.rawq:
raise EOFError, 'telnet connection closed'
return buf
def read_sb_data(self):
"""Return any data available in the SB ... SE queue.
Return '' if no SB ... SE available. Should only be called
after seeing a SB or SE command. When a new SB command is
found, old unread SB data will be discarded. Don't block.
"""
buf = self.sbdataq
self.sbdataq = ''
return buf
def set_option_negotiation_callback(self, callback):
"""Provide a callback function called after each receipt of a telnet option."""
self.option_callback = callback
def process_rawq(self):
"""Transfer from raw queue to cooked queue.
Set self.eof when connection is closed. Don't block unless in
the midst of an IAC sequence.
"""
buf = ['', '']
try:
while self.rawq:
c = self.rawq_getchar()
if not self.iacseq:
if c == theNULL:
continue
if c == "\021":
continue
if c != IAC:
buf[self.sb] = buf[self.sb] + c
continue
else:
self.iacseq += c
elif len(self.iacseq) == 1:
# 'IAC: IAC CMD [OPTION only for WILL/WONT/DO/DONT]'
if c in (DO, DONT, WILL, WONT):
self.iacseq += c
continue
self.iacseq = ''
if c == IAC:
buf[self.sb] = buf[self.sb] + c
else:
if c == SB: # SB ... SE start.
self.sb = 1
self.sbdataq = ''
elif c == SE:
self.sb = 0
self.sbdataq = self.sbdataq + buf[1]
buf[1] = ''
if self.option_callback:
# Callback is supposed to look into
# the sbdataq
self.option_callback(self.sock, c, NOOPT)
else:
# We can't offer automatic processing of
# suboptions. Alas, we should not get any
# unless we did a WILL/DO before.
self.msg('IAC %d not recognized' % ord(c))
elif len(self.iacseq) == 2:
cmd = self.iacseq[1]
self.iacseq = ''
opt = c
if cmd in (DO, DONT):
self.msg('IAC %s %d',
cmd == DO and 'DO' or 'DONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + WONT + opt)
elif cmd in (WILL, WONT):
self.msg('IAC %s %d',
cmd == WILL and 'WILL' or 'WONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + DONT + opt)
except EOFError: # raised by self.rawq_getchar()
self.iacseq = '' # Reset on EOF
self.sb = 0
pass
self.cookedq = self.cookedq + buf[0]
self.sbdataq = self.sbdataq + buf[1]
def rawq_getchar(self):
"""Get next char from raw queue.
Block if no data is immediately available. Raise EOFError
when connection is closed.
"""
if not self.rawq:
self.fill_rawq()
if self.eof:
raise EOFError
c = self.rawq[self.irawq]
self.irawq = self.irawq + 1
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
return c
def fill_rawq(self):
"""Fill raw queue from exactly one recv() system call.
Block if no data is immediately available. Set self.eof when
connection is closed.
"""
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
# The buffer size should be fairly small so as to avoid quadratic
# behavior in process_rawq() above
buf = self.sock.recv(50)
self.msg("recv %r", buf)
self.eof = (not buf)
self.rawq = self.rawq + buf
def sock_avail(self):
"""Test whether data is available on the socket."""
return select.select([self], [], [], 0) == ([self], [], [])
def interact(self):
"""Interaction function, emulates a very dumb telnet client."""
if sys.platform == "win32":
self.mt_interact()
return
while 1:
rfd, wfd, xfd = select.select([self, sys.stdin], [], [])
if self in rfd:
try:
text = self.read_eager()
except EOFError:
print '*** Connection closed by remote host ***'
break
if text:
sys.stdout.write(text)
sys.stdout.flush()
if sys.stdin in rfd:
line = sys.stdin.readline()
if not line:
break
self.write(line)
def mt_interact(self):
"""Multithreaded version of interact()."""
import thread
thread.start_new_thread(self.listener, ())
while 1:
line = sys.stdin.readline()
if not line:
break
self.write(line)
def listener(self):
"""Helper for mt_interact() -- this executes in the other thread."""
while 1:
try:
data = self.read_eager()
except EOFError:
print '*** Connection closed by remote host ***'
return
if data:
sys.stdout.write(data)
else:
sys.stdout.flush()
def expect(self, list, timeout=None):
"""Read until one from a list of a regular expressions matches.
The first argument is a list of regular expressions, either
compiled (re.RegexObject instances) or uncompiled (strings).
The optional second argument is a timeout, in seconds; default
is no timeout.
Return a tuple of three items: the index in the list of the
first regular expression that matches; the match object
returned; and the text read up till and including the match.
If EOF is read and no text was read, raise EOFError.
Otherwise, when nothing matches, return (-1, None, text) where
text is the text received so far (may be the empty string if a
timeout happened).
If a regular expression ends with a greedy match (e.g. '.*')
or if more than one expression can match the same input, the
results are undeterministic, and may depend on the I/O timing.
"""
re = None
list = list[:]
indices = range(len(list))
for i in indices:
if not hasattr(list[i], "search"):
if not re: import re
list[i] = re.compile(list[i])
if timeout is not None:
from time import time
time_start = time()
while 1:
self.process_rawq()
for i in indices:
m = list[i].search(self.cookedq)
if m:
e = m.end()
text = self.cookedq[:e]
self.cookedq = self.cookedq[e:]
return (i, m, text)
if self.eof:
break
if timeout is not None:
elapsed = time() - time_start
if elapsed >= timeout:
break
s_args = ([self.fileno()], [], [], timeout-elapsed)
r, w, x = select.select(*s_args)
if not r:
break
self.fill_rawq()
text = self.read_very_lazy()
if not text and self.eof:
raise EOFError
return (-1, None, text)
def test():
"""Test program for telnetlib.
Usage: python telnetlib.py [-d] ... [host [port]]
Default host is localhost; default port is 23.
"""
debuglevel = 0
while sys.argv[1:] and sys.argv[1] == '-d':
debuglevel = debuglevel+1
del sys.argv[1]
host = 'localhost'
if sys.argv[1:]:
host = sys.argv[1]
port = 0
if sys.argv[2:]:
portstr = sys.argv[2]
try:
port = int(portstr)
except ValueError:
port = socket.getservbyname(portstr, 'tcp')
tn = Telnet()
tn.set_debuglevel(debuglevel)
tn.open(host, port, timeout=0.5)
tn.interact()
tn.close()
if __name__ == '__main__':
test()
| artistic-2.0 |
muminoff/tarjimonlar-analytics | fabfile.py | 13 | 1088 | #!/usr/bin/env python
"""Fabfile using only commands from buedafab (https://github.com/bueda/ops) to
deploy this app to remote servers.
"""
import os
from fabric.api import *
from buedafab.test import (test, tornado_test_runner as _tornado_test_runner,
lint)
from buedafab.deploy.types import tornado_deploy as deploy
from buedafab.environments import development, staging, production, localhost
from buedafab.tasks import (setup, restart_webserver, rollback, enable,
disable, maintenancemode, rechef)
# For a description of these attributes, see https://github.com/bueda/ops
env.unit = "boilerplate"
env.path = "/var/webapps/%(unit)s" % env
env.scm = "git@github.com:bueda/%(unit)s.git" % env
env.scm_http_url = "http://github.com/bueda/%(unit)s" % env
env.root_dir = os.path.abspath(os.path.dirname(__file__))
env.test_runner = _tornado_test_runner
env.pip_requirements = ["requirements/common.txt",
"vendor/allo/pip-requirements.txt",]
env.pip_requirements_dev = ["requirements/dev.txt",]
env.pip_requirements_production = ["requirements/production.txt",]
| bsd-2-clause |
subutai/nupic | examples/sp/hello_sp.py | 25 | 5011 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""A simple program that demonstrates the working of the spatial pooler"""
import numpy as np
import random
from nupic.bindings.algorithms import SpatialPooler as SP
uintType = "uint32"
class Example(object):
"""A class to hold our code.
TODO: Get rid of this class, it just makes it more difficult to read the
code.
"""
def __init__(self, inputDimensions, columnDimensions):
"""
Parameters:
----------
_inputDimensions: The size of the input. (m,n) will give a size m x n
_columnDimensions: The size of the 2 dimensional array of columns
"""
self.inputDimensions = inputDimensions
self.columnDimensions = columnDimensions
self.inputSize = np.array(inputDimensions).prod()
self.columnNumber = np.array(columnDimensions).prod()
self.inputArray = np.zeros(self.inputSize, dtype=uintType)
self.activeArray = np.zeros(self.columnNumber, dtype=uintType)
random.seed(1)
self.sp = SP(self.inputDimensions,
self.columnDimensions,
potentialRadius = self.inputSize,
numActiveColumnsPerInhArea = int(0.02*self.columnNumber),
globalInhibition = True,
seed = 1,
synPermActiveInc = 0.01,
synPermInactiveDec = 0.008)
def createInput(self):
"""create a random input vector"""
print "-" * 70 + "Creating a random input vector" + "-" * 70
#clear the inputArray to zero before creating a new input vector
self.inputArray[0:] = 0
for i in range(self.inputSize):
#randrange returns 0 or 1
self.inputArray[i] = random.randrange(2)
def run(self):
"""Run the spatial pooler with the input vector"""
print "-" * 80 + "Computing the SDR" + "-" * 80
#activeArray[column]=1 if column is active after spatial pooling
self.sp.compute(self.inputArray, True, self.activeArray)
print self.activeArray.nonzero()
def addNoise(self, noiseLevel):
"""Flip the value of 10% of input bits (add noise)
:param noiseLevel: The percentage of total input bits that should be flipped
"""
for _ in range(int(noiseLevel * self.inputSize)):
# 0.1*self.inputSize represents 10% of the total input bits
# random.random() returns a float between 0 and 1
randomPosition = int(random.random() * self.inputSize)
# Flipping the bit at the randomly picked position
if self.inputArray[randomPosition] == 1:
self.inputArray[randomPosition] = 0
else:
self.inputArray[randomPosition] = 1
# Uncomment the following line to know which positions had been flipped.
# print "The value at " + str(randomPosition) + " has been flipped"
example = Example((32, 32), (64, 64))
# Lesson 1
print "\n \nFollowing columns represent the SDR"
print "Different set of columns each time since we randomize the input"
print "Lesson - different input vectors give different SDRs\n\n"
# Trying random vectors
for i in range(3):
example.createInput()
example.run()
# Lesson 2
print "\n\nIdentical SDRs because we give identical inputs"
print "Lesson - identical inputs give identical SDRs\n\n"
print "-" * 75 + "Using identical input vectors" + "-" * 75
# Trying identical vectors
for i in range(2):
example.run()
# Lesson 3
print "\n\nNow we are changing the input vector slightly."
print "We change a small percentage of 1s to 0s and 0s to 1s."
print "The resulting SDRs are similar, but not identical to the original SDR"
print "Lesson - Similar input vectors give similar SDRs\n\n"
# Adding 10% noise to the input vector
# Notice how the output SDR hardly changes at all
print "-" * 75 + "After adding 10% noise to the input vector" + "-" * 75
example.addNoise(0.1)
example.run()
# Adding another 20% noise to the already modified input vector
# The output SDR should differ considerably from that of the previous output
print "-" * 75 + "After adding another 20% noise to the input vector" + "-" * 75
example.addNoise(0.2)
example.run()
| agpl-3.0 |
chuan9/chromium-crosswalk | tools/deep_memory_profiler/accumulate.py | 100 | 9536 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# A script to accumulate values from the 'dmprof cat' command into CSV or else.
#
# Usage:
# ./accumulate.py -f <format> -t <template-name> < input.json > output
#
# <format> is one of "csv", "json", and "tree". If "csv" or "json" is given,
# accumulate.py dumps a similar file to "dmprof csv|json". If "tree" is given,
# accumulate.py dumps a human-readable breakdown tree.
#
# <template-name> is a label in templates.json.
import datetime
import json
import logging
import optparse
import sys
from lib.ordered_dict import OrderedDict
LOGGER = logging.getLogger('dmprof-accumulate')
def visit_in_template(template, snapshot, depth):
"""Visits all categories via a given template.
This function is not used. It's a sample function to traverse a template.
"""
world = template[0]
breakdown = template[1]
rules = template[2]
for rule, _ in snapshot[world]['breakdown'][breakdown].iteritems():
print (' ' * depth) + rule
if rule in rules:
visit_in_template(rules[rule], snapshot, depth + 1)
def accumulate(template, snapshot, units_dict, target_units):
"""Accumulates units in a JSON |snapshot| with applying a given |template|.
Args:
template: A template tree included in a dmprof cat JSON file.
snapshot: A snapshot in a dmprof cat JSON file.
units_dict: A dict of units in worlds.
target_units: A list of unit ids which are a target of this accumulation.
"""
world = template[0]
breakdown = template[1]
rules = template[2]
remainder_units = target_units.copy()
category_tree = OrderedDict()
total = 0
for rule, match in snapshot[world]['breakdown'][breakdown].iteritems():
if 'hidden' in match and match['hidden']:
continue
matched_units = set(match['units']).intersection(target_units)
subtotal = 0
for unit_id in matched_units:
subtotal += units_dict[world][unit_id]
total += subtotal
remainder_units = remainder_units.difference(matched_units)
if rule not in rules:
# A category matched with |rule| is a leaf of the breakdown tree.
# It is NOT broken down more.
category_tree[rule] = subtotal
continue
# A category matched with |rule| is broken down more.
subtemplate = rules[rule]
subworld = subtemplate[0]
subbreakdown = subtemplate[1]
if subworld == world:
# Break down in the same world: consider units.
category_tree[rule], accounted_total, subremainder_units = accumulate(
subtemplate, snapshot, units_dict, matched_units)
subremainder_total = 0
if subremainder_units:
for unit_id in subremainder_units:
subremainder_total += units_dict[world][unit_id]
category_tree[rule][None] = subremainder_total
if subtotal != accounted_total + subremainder_total:
print >> sys.stderr, (
'WARNING: Sum of %s:%s is different from %s by %d bytes.' % (
subworld, subbreakdown, rule,
subtotal - (accounted_total + subremainder_total)))
else:
# Break down in a different world: consider only the total size.
category_tree[rule], accounted_total, _ = accumulate(
subtemplate, snapshot, units_dict, set(units_dict[subworld].keys()))
if subtotal >= accounted_total:
category_tree[rule][None] = subtotal - accounted_total
else:
print >> sys.stderr, (
'WARNING: Sum of %s:%s is larger than %s by %d bytes.' % (
subworld, subbreakdown, rule, accounted_total - subtotal))
print >> sys.stderr, (
'WARNING: Assuming remainder of %s is 0.' % rule)
category_tree[rule][None] = 0
return category_tree, total, remainder_units
def flatten(category_tree, header=''):
"""Flattens a category tree into a flat list."""
result = []
for rule, sub in category_tree.iteritems():
if not rule:
rule = 'remaining'
if header:
flattened_rule = header + '>' + rule
else:
flattened_rule = rule
if isinstance(sub, dict) or isinstance(sub, OrderedDict):
result.extend(flatten(sub, flattened_rule))
else:
result.append((flattened_rule, sub))
return result
def print_category_tree(category_tree, output, depth=0):
"""Prints a category tree in a human-readable format."""
for label in category_tree:
print >> output, (' ' * depth),
if (isinstance(category_tree[label], dict) or
isinstance(category_tree[label], OrderedDict)):
print >> output, '%s:' % label
print_category_tree(category_tree[label], output, depth + 1)
else:
print >> output, '%s: %d' % (label, category_tree[label])
def flatten_all_category_trees(category_trees):
flattened_labels = set()
flattened_table = []
for category_tree in category_trees:
flattened = OrderedDict()
for label, subtotal in flatten(category_tree):
flattened_labels.add(label)
flattened[label] = subtotal
flattened_table.append(flattened)
return flattened_labels, flattened_table
def output_csv(output, category_trees, data, first_time, output_exponent):
flattened_labels, flattened_table = flatten_all_category_trees(category_trees)
sorted_flattened_labels = sorted(flattened_labels)
print >> output, ','.join(['second'] + sorted_flattened_labels)
for index, row in enumerate(flattened_table):
values = [str(data['snapshots'][index]['time'] - first_time)]
for label in sorted_flattened_labels:
if label in row:
divisor = 1
if output_exponent.upper() == 'K':
divisor = 1024.0
elif output_exponent.upper() == 'M':
divisor = 1024.0 * 1024.0
values.append(str(row[label] / divisor))
else:
values.append('0')
print >> output, ','.join(values)
def output_json(output, category_trees, data, first_time, template_label):
flattened_labels, flattened_table = flatten_all_category_trees(category_trees)
json_snapshots = []
for index, row in enumerate(flattened_table):
row_with_meta = row.copy()
row_with_meta['second'] = data['snapshots'][index]['time'] - first_time
row_with_meta['dump_time'] = datetime.datetime.fromtimestamp(
data['snapshots'][index]['time']).strftime('%Y-%m-%d %H:%M:%S')
json_snapshots.append(row_with_meta)
json_root = {
'version': 'JSON_DEEP_2',
'policies': {
template_label: {
'legends': sorted(flattened_labels),
'snapshots': json_snapshots
}
}
}
json.dump(json_root, output, indent=2, sort_keys=True)
def output_tree(output, category_trees):
for index, category_tree in enumerate(category_trees):
print >> output, '< Snapshot #%d >' % index
print_category_tree(category_tree, output, 1)
print >> output, ''
def do_main(cat_input, output, template_label, output_format, output_exponent):
"""Does the main work: accumulate for every snapshot and print a result."""
if output_format not in ['csv', 'json', 'tree']:
raise NotImplementedError('The output format \"%s\" is not implemented.' %
output_format)
if output_exponent.upper() not in ['B', 'K', 'M']:
raise NotImplementedError('The exponent \"%s\" is not implemented.' %
output_exponent)
data = json.loads(cat_input.read(), object_pairs_hook=OrderedDict)
templates = data['templates']
if not template_label:
template_label = data['default_template']
if template_label not in templates:
LOGGER.error('A template \'%s\' is not found.' % template_label)
return
template = templates[template_label]
category_trees = []
first_time = None
for snapshot in data['snapshots']:
if not first_time:
first_time = snapshot['time']
units = {}
for world_name in snapshot['worlds']:
world_units = {}
for unit_id, sizes in snapshot['worlds'][world_name]['units'].iteritems():
world_units[int(unit_id)] = sizes[0]
units[world_name] = world_units
category_tree, _, _ = accumulate(
template, snapshot['worlds'], units, set(units[template[0]].keys()))
category_trees.append(category_tree)
if output_format == 'csv':
output_csv(output, category_trees, data, first_time, output_exponent)
elif output_format == 'json':
output_json(output, category_trees, data, first_time, template_label)
elif output_format == 'tree':
output_tree(output, category_trees)
def main():
LOGGER.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
LOGGER.addHandler(handler)
parser = optparse.OptionParser()
parser.add_option('-t', '--template', dest='template',
metavar='TEMPLATE',
help='Apply TEMPLATE to list up.')
parser.add_option('-f', '--format', dest='format', default='csv',
help='Specify the output format: csv, json or tree.')
parser.add_option('-e', '--exponent', dest='exponent', default='M',
help='Specify B (bytes), K (kilobytes) or M (megabytes).')
options, _ = parser.parse_args(sys.argv)
do_main(sys.stdin, sys.stdout,
options.template, options.format, options.exponent)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
hassoon3/odoo | addons/portal/mail_mail.py | 320 | 2625 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import osv
from openerp.tools.translate import _
class mail_mail(osv.Model):
""" Update of mail_mail class, to add the signin URL to notifications. """
_inherit = 'mail.mail'
def _get_partner_access_link(self, cr, uid, mail, partner=None, context=None):
""" Generate URLs for links in mails:
- partner is not an user: signup_url
- partner is an user: fallback on classic URL
"""
if context is None:
context = {}
partner_obj = self.pool.get('res.partner')
if partner and not partner.user_ids:
contex_signup = dict(context, signup_valid=True)
signup_url = partner_obj._get_signup_url_for_action(cr, SUPERUSER_ID, [partner.id],
action='mail.action_mail_redirect',
model=mail.model, res_id=mail.res_id,
context=contex_signup)[partner.id]
return ", <span class='oe_mail_footer_access'><small>%(access_msg)s <a style='color:inherit' href='%(portal_link)s'>%(portal_msg)s</a></small></span>" % {
'access_msg': _('access directly to'),
'portal_link': signup_url,
'portal_msg': '%s %s' % (context.get('model_name', ''), mail.record_name) if mail.record_name else _('your messages '),
}
else:
return super(mail_mail, self)._get_partner_access_link(cr, uid, mail, partner=partner, context=context)
| agpl-3.0 |
Emilgardis/falloutsnip | Vendor/IronPython/Lib/rexec.py | 228 | 20148 | """Restricted execution facilities.
The class RExec exports methods r_exec(), r_eval(), r_execfile(), and
r_import(), which correspond roughly to the built-in operations
exec, eval(), execfile() and import, but executing the code in an
environment that only exposes those built-in operations that are
deemed safe. To this end, a modest collection of 'fake' modules is
created which mimics the standard modules by the same names. It is a
policy decision which built-in modules and operations are made
available; this module provides a reasonable default, but derived
classes can change the policies e.g. by overriding or extending class
variables like ok_builtin_modules or methods like make_sys().
XXX To do:
- r_open should allow writing tmp dir
- r_exec etc. with explicit globals/locals? (Use rexec("exec ... in ...")?)
"""
from warnings import warnpy3k
warnpy3k("the rexec module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import sys
import __builtin__
import os
import ihooks
import imp
__all__ = ["RExec"]
class FileBase:
ok_file_methods = ('fileno', 'flush', 'isatty', 'read', 'readline',
'readlines', 'seek', 'tell', 'write', 'writelines', 'xreadlines',
'__iter__')
class FileWrapper(FileBase):
# XXX This is just like a Bastion -- should use that!
def __init__(self, f):
for m in self.ok_file_methods:
if not hasattr(self, m) and hasattr(f, m):
setattr(self, m, getattr(f, m))
def close(self):
self.flush()
TEMPLATE = """
def %s(self, *args):
return getattr(self.mod, self.name).%s(*args)
"""
class FileDelegate(FileBase):
def __init__(self, mod, name):
self.mod = mod
self.name = name
for m in FileBase.ok_file_methods + ('close',):
exec TEMPLATE % (m, m)
class RHooks(ihooks.Hooks):
def __init__(self, *args):
# Hacks to support both old and new interfaces:
# old interface was RHooks(rexec[, verbose])
# new interface is RHooks([verbose])
verbose = 0
rexec = None
if args and type(args[-1]) == type(0):
verbose = args[-1]
args = args[:-1]
if args and hasattr(args[0], '__class__'):
rexec = args[0]
args = args[1:]
if args:
raise TypeError, "too many arguments"
ihooks.Hooks.__init__(self, verbose)
self.rexec = rexec
def set_rexec(self, rexec):
# Called by RExec instance to complete initialization
self.rexec = rexec
def get_suffixes(self):
return self.rexec.get_suffixes()
def is_builtin(self, name):
return self.rexec.is_builtin(name)
def init_builtin(self, name):
m = __import__(name)
return self.rexec.copy_except(m, ())
def init_frozen(self, name): raise SystemError, "don't use this"
def load_source(self, *args): raise SystemError, "don't use this"
def load_compiled(self, *args): raise SystemError, "don't use this"
def load_package(self, *args): raise SystemError, "don't use this"
def load_dynamic(self, name, filename, file):
return self.rexec.load_dynamic(name, filename, file)
def add_module(self, name):
return self.rexec.add_module(name)
def modules_dict(self):
return self.rexec.modules
def default_path(self):
return self.rexec.modules['sys'].path
# XXX Backwards compatibility
RModuleLoader = ihooks.FancyModuleLoader
RModuleImporter = ihooks.ModuleImporter
class RExec(ihooks._Verbose):
"""Basic restricted execution framework.
Code executed in this restricted environment will only have access to
modules and functions that are deemed safe; you can subclass RExec to
add or remove capabilities as desired.
The RExec class can prevent code from performing unsafe operations like
reading or writing disk files, or using TCP/IP sockets. However, it does
not protect against code using extremely large amounts of memory or
processor time.
"""
ok_path = tuple(sys.path) # That's a policy decision
ok_builtin_modules = ('audioop', 'array', 'binascii',
'cmath', 'errno', 'imageop',
'marshal', 'math', 'md5', 'operator',
'parser', 'select',
'sha', '_sre', 'strop', 'struct', 'time',
'_weakref')
ok_posix_names = ('error', 'fstat', 'listdir', 'lstat', 'readlink',
'stat', 'times', 'uname', 'getpid', 'getppid',
'getcwd', 'getuid', 'getgid', 'geteuid', 'getegid')
ok_sys_names = ('byteorder', 'copyright', 'exit', 'getdefaultencoding',
'getrefcount', 'hexversion', 'maxint', 'maxunicode',
'platform', 'ps1', 'ps2', 'version', 'version_info')
nok_builtin_names = ('open', 'file', 'reload', '__import__')
ok_file_types = (imp.C_EXTENSION, imp.PY_SOURCE)
def __init__(self, hooks = None, verbose = 0):
"""Returns an instance of the RExec class.
The hooks parameter is an instance of the RHooks class or a subclass
of it. If it is omitted or None, the default RHooks class is
instantiated.
Whenever the RExec module searches for a module (even a built-in one)
or reads a module's code, it doesn't actually go out to the file
system itself. Rather, it calls methods of an RHooks instance that
was passed to or created by its constructor. (Actually, the RExec
object doesn't make these calls --- they are made by a module loader
object that's part of the RExec object. This allows another level of
flexibility, which can be useful when changing the mechanics of
import within the restricted environment.)
By providing an alternate RHooks object, we can control the file
system accesses made to import a module, without changing the
actual algorithm that controls the order in which those accesses are
made. For instance, we could substitute an RHooks object that
passes all filesystem requests to a file server elsewhere, via some
RPC mechanism such as ILU. Grail's applet loader uses this to support
importing applets from a URL for a directory.
If the verbose parameter is true, additional debugging output may be
sent to standard output.
"""
raise RuntimeError, "This code is not secure in Python 2.2 and later"
ihooks._Verbose.__init__(self, verbose)
# XXX There's a circular reference here:
self.hooks = hooks or RHooks(verbose)
self.hooks.set_rexec(self)
self.modules = {}
self.ok_dynamic_modules = self.ok_builtin_modules
list = []
for mname in self.ok_builtin_modules:
if mname in sys.builtin_module_names:
list.append(mname)
self.ok_builtin_modules = tuple(list)
self.set_trusted_path()
self.make_builtin()
self.make_initial_modules()
# make_sys must be last because it adds the already created
# modules to its builtin_module_names
self.make_sys()
self.loader = RModuleLoader(self.hooks, verbose)
self.importer = RModuleImporter(self.loader, verbose)
def set_trusted_path(self):
# Set the path from which dynamic modules may be loaded.
# Those dynamic modules must also occur in ok_builtin_modules
self.trusted_path = filter(os.path.isabs, sys.path)
def load_dynamic(self, name, filename, file):
if name not in self.ok_dynamic_modules:
raise ImportError, "untrusted dynamic module: %s" % name
if name in sys.modules:
src = sys.modules[name]
else:
src = imp.load_dynamic(name, filename, file)
dst = self.copy_except(src, [])
return dst
def make_initial_modules(self):
self.make_main()
self.make_osname()
# Helpers for RHooks
def get_suffixes(self):
return [item # (suff, mode, type)
for item in imp.get_suffixes()
if item[2] in self.ok_file_types]
def is_builtin(self, mname):
return mname in self.ok_builtin_modules
# The make_* methods create specific built-in modules
def make_builtin(self):
m = self.copy_except(__builtin__, self.nok_builtin_names)
m.__import__ = self.r_import
m.reload = self.r_reload
m.open = m.file = self.r_open
def make_main(self):
self.add_module('__main__')
def make_osname(self):
osname = os.name
src = __import__(osname)
dst = self.copy_only(src, self.ok_posix_names)
dst.environ = e = {}
for key, value in os.environ.items():
e[key] = value
def make_sys(self):
m = self.copy_only(sys, self.ok_sys_names)
m.modules = self.modules
m.argv = ['RESTRICTED']
m.path = map(None, self.ok_path)
m.exc_info = self.r_exc_info
m = self.modules['sys']
l = self.modules.keys() + list(self.ok_builtin_modules)
l.sort()
m.builtin_module_names = tuple(l)
# The copy_* methods copy existing modules with some changes
def copy_except(self, src, exceptions):
dst = self.copy_none(src)
for name in dir(src):
setattr(dst, name, getattr(src, name))
for name in exceptions:
try:
delattr(dst, name)
except AttributeError:
pass
return dst
def copy_only(self, src, names):
dst = self.copy_none(src)
for name in names:
try:
value = getattr(src, name)
except AttributeError:
continue
setattr(dst, name, value)
return dst
def copy_none(self, src):
m = self.add_module(src.__name__)
m.__doc__ = src.__doc__
return m
# Add a module -- return an existing module or create one
def add_module(self, mname):
m = self.modules.get(mname)
if m is None:
self.modules[mname] = m = self.hooks.new_module(mname)
m.__builtins__ = self.modules['__builtin__']
return m
# The r* methods are public interfaces
def r_exec(self, code):
"""Execute code within a restricted environment.
The code parameter must either be a string containing one or more
lines of Python code, or a compiled code object, which will be
executed in the restricted environment's __main__ module.
"""
m = self.add_module('__main__')
exec code in m.__dict__
def r_eval(self, code):
"""Evaluate code within a restricted environment.
The code parameter must either be a string containing a Python
expression, or a compiled code object, which will be evaluated in
the restricted environment's __main__ module. The value of the
expression or code object will be returned.
"""
m = self.add_module('__main__')
return eval(code, m.__dict__)
def r_execfile(self, file):
"""Execute the Python code in the file in the restricted
environment's __main__ module.
"""
m = self.add_module('__main__')
execfile(file, m.__dict__)
def r_import(self, mname, globals={}, locals={}, fromlist=[]):
"""Import a module, raising an ImportError exception if the module
is considered unsafe.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
"""
return self.importer.import_module(mname, globals, locals, fromlist)
def r_reload(self, m):
"""Reload the module object, re-parsing and re-initializing it.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
"""
return self.importer.reload(m)
def r_unload(self, m):
"""Unload the module.
Removes it from the restricted environment's sys.modules dictionary.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
"""
return self.importer.unload(m)
# The s_* methods are similar but also swap std{in,out,err}
def make_delegate_files(self):
s = self.modules['sys']
self.delegate_stdin = FileDelegate(s, 'stdin')
self.delegate_stdout = FileDelegate(s, 'stdout')
self.delegate_stderr = FileDelegate(s, 'stderr')
self.restricted_stdin = FileWrapper(sys.stdin)
self.restricted_stdout = FileWrapper(sys.stdout)
self.restricted_stderr = FileWrapper(sys.stderr)
def set_files(self):
if not hasattr(self, 'save_stdin'):
self.save_files()
if not hasattr(self, 'delegate_stdin'):
self.make_delegate_files()
s = self.modules['sys']
s.stdin = self.restricted_stdin
s.stdout = self.restricted_stdout
s.stderr = self.restricted_stderr
sys.stdin = self.delegate_stdin
sys.stdout = self.delegate_stdout
sys.stderr = self.delegate_stderr
def reset_files(self):
self.restore_files()
s = self.modules['sys']
self.restricted_stdin = s.stdin
self.restricted_stdout = s.stdout
self.restricted_stderr = s.stderr
def save_files(self):
self.save_stdin = sys.stdin
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
def restore_files(self):
sys.stdin = self.save_stdin
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
def s_apply(self, func, args=(), kw={}):
self.save_files()
try:
self.set_files()
r = func(*args, **kw)
finally:
self.restore_files()
return r
def s_exec(self, *args):
"""Execute code within a restricted environment.
Similar to the r_exec() method, but the code will be granted access
to restricted versions of the standard I/O streams sys.stdin,
sys.stderr, and sys.stdout.
The code parameter must either be a string containing one or more
lines of Python code, or a compiled code object, which will be
executed in the restricted environment's __main__ module.
"""
return self.s_apply(self.r_exec, args)
def s_eval(self, *args):
"""Evaluate code within a restricted environment.
Similar to the r_eval() method, but the code will be granted access
to restricted versions of the standard I/O streams sys.stdin,
sys.stderr, and sys.stdout.
The code parameter must either be a string containing a Python
expression, or a compiled code object, which will be evaluated in
the restricted environment's __main__ module. The value of the
expression or code object will be returned.
"""
return self.s_apply(self.r_eval, args)
def s_execfile(self, *args):
"""Execute the Python code in the file in the restricted
environment's __main__ module.
Similar to the r_execfile() method, but the code will be granted
access to restricted versions of the standard I/O streams sys.stdin,
sys.stderr, and sys.stdout.
"""
return self.s_apply(self.r_execfile, args)
def s_import(self, *args):
"""Import a module, raising an ImportError exception if the module
is considered unsafe.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
Similar to the r_import() method, but has access to restricted
versions of the standard I/O streams sys.stdin, sys.stderr, and
sys.stdout.
"""
return self.s_apply(self.r_import, args)
def s_reload(self, *args):
"""Reload the module object, re-parsing and re-initializing it.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
Similar to the r_reload() method, but has access to restricted
versions of the standard I/O streams sys.stdin, sys.stderr, and
sys.stdout.
"""
return self.s_apply(self.r_reload, args)
def s_unload(self, *args):
"""Unload the module.
Removes it from the restricted environment's sys.modules dictionary.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
Similar to the r_unload() method, but has access to restricted
versions of the standard I/O streams sys.stdin, sys.stderr, and
sys.stdout.
"""
return self.s_apply(self.r_unload, args)
# Restricted open(...)
def r_open(self, file, mode='r', buf=-1):
"""Method called when open() is called in the restricted environment.
The arguments are identical to those of the open() function, and a
file object (or a class instance compatible with file objects)
should be returned. RExec's default behaviour is allow opening
any file for reading, but forbidding any attempt to write a file.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
"""
mode = str(mode)
if mode not in ('r', 'rb'):
raise IOError, "can't open files for writing in restricted mode"
return open(file, mode, buf)
# Restricted version of sys.exc_info()
def r_exc_info(self):
ty, va, tr = sys.exc_info()
tr = None
return ty, va, tr
def test():
import getopt, traceback
opts, args = getopt.getopt(sys.argv[1:], 'vt:')
verbose = 0
trusted = []
for o, a in opts:
if o == '-v':
verbose = verbose+1
if o == '-t':
trusted.append(a)
r = RExec(verbose=verbose)
if trusted:
r.ok_builtin_modules = r.ok_builtin_modules + tuple(trusted)
if args:
r.modules['sys'].argv = args
r.modules['sys'].path.insert(0, os.path.dirname(args[0]))
else:
r.modules['sys'].path.insert(0, "")
fp = sys.stdin
if args and args[0] != '-':
try:
fp = open(args[0])
except IOError, msg:
print "%s: can't open file %r" % (sys.argv[0], args[0])
return 1
if fp.isatty():
try:
import readline
except ImportError:
pass
import code
class RestrictedConsole(code.InteractiveConsole):
def runcode(self, co):
self.locals['__builtins__'] = r.modules['__builtin__']
r.s_apply(code.InteractiveConsole.runcode, (self, co))
try:
RestrictedConsole(r.modules['__main__'].__dict__).interact()
except SystemExit, n:
return n
else:
text = fp.read()
fp.close()
c = compile(text, fp.name, 'exec')
try:
r.s_exec(c)
except SystemExit, n:
return n
except:
traceback.print_exc()
return 1
if __name__ == '__main__':
sys.exit(test())
| gpl-3.0 |
geodashio/geodash-framework-django | geodash/static/geodash/lib/bootstrap/3.3.5/test-infra/s3_cache.py | 2166 | 5734 | #!/usr/bin/env python2.7
# pylint: disable=C0301
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, chdir, remove as _delete_file
from os.path import dirname, basename, abspath, realpath, expandvars
from hashlib import sha256
from subprocess import check_call as run
from json import load, dump as save
from contextlib import contextmanager
from datetime import datetime
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
CONFIG_FILE = './S3Cachefile.json'
UPLOAD_TODO_FILE = './S3CacheTodo.json'
BYTES_PER_MB = 1024 * 1024
@contextmanager
def timer():
start = datetime.utcnow()
yield
end = datetime.utcnow()
elapsed = end - start
print("\tDone. Took", int(elapsed.total_seconds()), "second(s).")
@contextmanager
def todo_file(writeback=True):
try:
with open(UPLOAD_TODO_FILE, 'rt') as json_file:
todo = load(json_file)
except (IOError, OSError, ValueError):
todo = {}
yield todo
if writeback:
try:
with open(UPLOAD_TODO_FILE, 'wt') as json_file:
save(todo, json_file)
except (OSError, IOError) as save_err:
print("Error saving {}:".format(UPLOAD_TODO_FILE), save_err)
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def mark_needs_uploading(cache_name):
with todo_file() as todo:
todo[cache_name] = True
def mark_uploaded(cache_name):
with todo_file() as todo:
todo.pop(cache_name, None)
def need_to_upload(cache_name):
with todo_file(writeback=False) as todo:
return todo.get(cache_name, False)
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
with timer():
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
with timer():
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
mark_uploaded(cache_name) # reset
try:
print("Downloading {} tarball from S3...".format(cache_name))
with timer():
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
mark_needs_uploading(cache_name)
raise SystemExit("Cached {} download failed!".format(cache_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(cache_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(cache_name, _tarball_size(directory)))
with timer():
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(cache_name))
mark_uploaded(cache_name)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 2:
raise SystemExit("USAGE: s3_cache.py <download | upload> <cache name>")
mode, cache_name = argv
script_dir = dirname(realpath(__file__))
chdir(script_dir)
try:
with open(CONFIG_FILE, 'rt') as config_file:
config = load(config_file)
except (IOError, OSError, ValueError) as config_err:
print(config_err)
raise SystemExit("Error when trying to load config from JSON file!")
try:
cache_info = config[cache_name]
key_file = expandvars(cache_info["key"])
fallback_cmd = cache_info["generate"]
directory = expandvars(cache_info["cache"])
except (TypeError, KeyError) as load_err:
print(load_err)
raise SystemExit("Config for cache named {!r} is missing or malformed!".format(cache_name))
try:
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME)
if bucket is None:
raise SystemExit("Could not access bucket!")
key_file_hash = _sha256_of_file(key_file)
key = Key(bucket, key_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if need_to_upload(cache_name):
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
except BaseException as exc:
if mode != 'download':
raise
print("Error!:", exc)
print("Unable to download from cache.")
print("Running fallback command to generate cache directory {!r}: {}".format(directory, fallback_cmd))
with timer():
run(fallback_cmd, shell=True)
| bsd-3-clause |
kingoflolz/hearthbreaker | hearthbreaker/serialization/move.py | 8 | 6665 | import hearthbreaker.game_objects
__author__ = 'dyule'
class Move:
def __init__(self):
super().__init__()
self.random_numbers = []
def play(self, game):
pass
@staticmethod
def from_json(name, random=[], **json):
cls = None
if name == 'play':
cls = PlayMove
elif name == 'attack':
cls = AttackMove
elif name == 'power':
cls = PowerMove
elif name == 'end':
cls = TurnEndMove
elif name == 'start':
cls = TurnStartMove
elif name == 'concede':
cls = ConcedeMove
obj = cls.__new__(cls)
cls.__from_json__(obj, **json)
obj.random_numbers = []
for num in random:
if isinstance(num, dict):
obj.random_numbers.append(hearthbreaker.proxies.ProxyCharacter.from_json(**num))
else:
obj.random_numbers.append(num)
return obj
def __to_json__(self):
pass
def __update_json__(self, json):
if len(self.random_numbers) > 0:
json.update({
'random': self.random_numbers
})
return json
class PlayMove(Move):
def __init__(self, card, index=-1, target=None):
super().__init__()
self.card = card
self.index = index
if target is not None:
self.target = hearthbreaker.proxies.ProxyCharacter(target)
else:
self.target = None
def play(self, game):
if self.target is not None:
game.current_player.agent.next_target = self.target.resolve(game)
game.current_player.agent.next_index = self.index
game.play_card(self.card.resolve(game))
game.current_player.agent.nextIndex = -1
def to_output_string(self):
if self.index > -1:
if self.target is not None:
return 'summon({0},{1},{2})'.format(self.card.to_output(), self.index, self.target.to_output())
return 'summon({0},{1})'.format(self.card.to_output(), self.index)
else:
if self.target is not None:
return 'play({0},{1})'.format(self.card.to_output(), self.target.to_output())
return 'play({0})'.format(self.card.to_output())
def __to_json__(self):
if self.target is not None:
if self.index > -1:
json = {
'name': 'play',
'card': self.card,
'index': self.index,
'target': self.target,
}
else:
json = {
'name': 'play',
'card': self.card,
'target': self.target,
}
else:
if self.index > -1:
json = {
'name': 'play',
'card': self.card,
'index': self.index,
}
else:
json = {
'name': 'play',
'card': self.card,
}
return self.__update_json__(json)
def __from_json__(self, card, index=-1, target=None):
self.card = hearthbreaker.proxies.ProxyCard.from_json(**card)
self.index = index
if target:
self.target = hearthbreaker.proxies.ProxyCharacter.from_json(**target)
else:
self.target = None
class AttackMove(Move):
def __init__(self, character, target):
super().__init__()
self.character = hearthbreaker.proxies.ProxyCharacter(character)
self.target = hearthbreaker.proxies.ProxyCharacter(target)
def to_output_string(self):
return 'attack({0},{1})'.format(self.character.to_output(), self.target.to_output())
def play(self, game):
game.current_player.agent.next_target = self.target.resolve(game)
self.character.resolve(game).attack()
game.current_player.agent.next_target = None
def __to_json__(self):
return self.__update_json__({
'name': 'attack',
'character': self.character,
'target': self.target,
})
def __from_json__(self, character, target):
self.character = hearthbreaker.proxies.ProxyCharacter.from_json(**character)
self.target = hearthbreaker.proxies.ProxyCharacter.from_json(**target)
class PowerMove(Move):
def __init__(self, target=None):
super().__init__()
self.target = target
if target is not None:
self.target = hearthbreaker.proxies.ProxyCharacter(target)
else:
self.target = None
def to_output_string(self):
if self.target is not None:
return 'power({0})'.format(self.target.to_output())
else:
return 'power()'
def play(self, game):
if self.target is not None:
game.current_player.agent.next_target = self.target.resolve(game)
game.current_player.hero.power.use()
game.current_player.agent.next_target = None
def __to_json__(self):
if self.target:
json = {
'name': 'power',
'target': self.target,
}
else:
json = {
'name': 'power',
}
return self.__update_json__(json)
def __from_json__(self, target=None):
if target is not None:
self.target = hearthbreaker.proxies.ProxyCharacter.from_json(**target)
else:
self.target = None
class TurnEndMove(Move):
def __init__(self):
super().__init__()
pass
def to_output_string(self):
return 'end()'
def play(self, game):
pass
def __to_json__(self):
return self.__update_json__({
'name': 'end',
})
def __from_json__(self):
pass
class TurnStartMove(Move):
def __init__(self):
super().__init__()
def to_output_string(self):
return 'start()'
def play(self, game):
pass
def __to_json__(self):
return self.__update_json__({
'name': 'start',
})
def __from_json__(self):
pass
class ConcedeMove(Move):
def __init__(self):
super().__init__()
def to_output_string(self):
return "concede()"
def play(self, game):
game.current_player.hero.die(None)
game.current_player.hero.activate_delayed()
def __to_json__(self):
return self.__update_json__({
'name': 'concede',
})
def __from_json__(self):
pass
| mit |
Resellers/bootstrap | test-infra/s3_cache.py | 2166 | 5734 | #!/usr/bin/env python2.7
# pylint: disable=C0301
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, chdir, remove as _delete_file
from os.path import dirname, basename, abspath, realpath, expandvars
from hashlib import sha256
from subprocess import check_call as run
from json import load, dump as save
from contextlib import contextmanager
from datetime import datetime
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
CONFIG_FILE = './S3Cachefile.json'
UPLOAD_TODO_FILE = './S3CacheTodo.json'
BYTES_PER_MB = 1024 * 1024
@contextmanager
def timer():
start = datetime.utcnow()
yield
end = datetime.utcnow()
elapsed = end - start
print("\tDone. Took", int(elapsed.total_seconds()), "second(s).")
@contextmanager
def todo_file(writeback=True):
try:
with open(UPLOAD_TODO_FILE, 'rt') as json_file:
todo = load(json_file)
except (IOError, OSError, ValueError):
todo = {}
yield todo
if writeback:
try:
with open(UPLOAD_TODO_FILE, 'wt') as json_file:
save(todo, json_file)
except (OSError, IOError) as save_err:
print("Error saving {}:".format(UPLOAD_TODO_FILE), save_err)
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def mark_needs_uploading(cache_name):
with todo_file() as todo:
todo[cache_name] = True
def mark_uploaded(cache_name):
with todo_file() as todo:
todo.pop(cache_name, None)
def need_to_upload(cache_name):
with todo_file(writeback=False) as todo:
return todo.get(cache_name, False)
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
with timer():
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
with timer():
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
mark_uploaded(cache_name) # reset
try:
print("Downloading {} tarball from S3...".format(cache_name))
with timer():
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
mark_needs_uploading(cache_name)
raise SystemExit("Cached {} download failed!".format(cache_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(cache_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(cache_name, _tarball_size(directory)))
with timer():
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(cache_name))
mark_uploaded(cache_name)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 2:
raise SystemExit("USAGE: s3_cache.py <download | upload> <cache name>")
mode, cache_name = argv
script_dir = dirname(realpath(__file__))
chdir(script_dir)
try:
with open(CONFIG_FILE, 'rt') as config_file:
config = load(config_file)
except (IOError, OSError, ValueError) as config_err:
print(config_err)
raise SystemExit("Error when trying to load config from JSON file!")
try:
cache_info = config[cache_name]
key_file = expandvars(cache_info["key"])
fallback_cmd = cache_info["generate"]
directory = expandvars(cache_info["cache"])
except (TypeError, KeyError) as load_err:
print(load_err)
raise SystemExit("Config for cache named {!r} is missing or malformed!".format(cache_name))
try:
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME)
if bucket is None:
raise SystemExit("Could not access bucket!")
key_file_hash = _sha256_of_file(key_file)
key = Key(bucket, key_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if need_to_upload(cache_name):
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
except BaseException as exc:
if mode != 'download':
raise
print("Error!:", exc)
print("Unable to download from cache.")
print("Running fallback command to generate cache directory {!r}: {}".format(directory, fallback_cmd))
with timer():
run(fallback_cmd, shell=True)
| mit |
zhoulingjun/django | tests/migrations/test_commands.py | 38 | 49416 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import codecs
import importlib
import os
from django.apps import apps
from django.core.management import CommandError, call_command
from django.db import DatabaseError, connection, models
from django.db.migrations.recorder import MigrationRecorder
from django.test import ignore_warnings, mock, override_settings
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text
from .models import UnicodeModel, UnserializableModel
from .test_base import MigrationTestBase
class MigrateTests(MigrationTestBase):
"""
Tests running the migrate command.
"""
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_migrate(self):
"""
Tests basic usage of the migrate command.
"""
# Make sure no tables are created
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
# Run the migrations to 0001 only
call_command("migrate", "migrations", "0001", verbosity=0)
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
# Run migrations all the way
call_command("migrate", verbosity=0)
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
# Unmigrate everything
call_command("migrate", "migrations", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_initial_false"})
def test_migrate_initial_false(self):
"""
`Migration.initial = False` skips fake-initial detection.
"""
# Make sure no tables are created
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Run the migrations to 0001 only
call_command("migrate", "migrations", "0001", verbosity=0)
# Fake rollback
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
# Make sure fake-initial detection does not run
with self.assertRaises(DatabaseError):
call_command("migrate", "migrations", "0001", fake_initial=True, verbosity=0)
call_command("migrate", "migrations", "0001", fake=True, verbosity=0)
# Real rollback
call_command("migrate", "migrations", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_migrate_fake_initial(self):
"""
#24184 - Tests that --fake-initial only works if all tables created in
the initial migration of an app exists
"""
# Make sure no tables are created
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Run the migrations to 0001 only
call_command("migrate", "migrations", "0001", verbosity=0)
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# Fake a roll-back
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
# Make sure the tables still exist
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# Try to run initial migration
with self.assertRaises(DatabaseError):
call_command("migrate", "migrations", "0001", verbosity=0)
# Run initial migration with an explicit --fake-initial
out = six.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: False):
call_command("migrate", "migrations", "0001", fake_initial=True, stdout=out, verbosity=1)
self.assertIn(
"migrations.0001_initial... faked",
out.getvalue().lower()
)
# Run migrations all the way
call_command("migrate", verbosity=0)
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
# Fake a roll-back
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
# Make sure the tables still exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
# Try to run initial migration
with self.assertRaises(DatabaseError):
call_command("migrate", "migrations", verbosity=0)
# Run initial migration with an explicit --fake-initial
with self.assertRaises(DatabaseError):
# Fails because "migrations_tribble" does not exist but needs to in
# order to make --fake-initial work.
call_command("migrate", "migrations", fake_initial=True, verbosity=0)
# Fake a apply
call_command("migrate", "migrations", fake=True, verbosity=0)
# Unmigrate everything
call_command("migrate", "migrations", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_fake_split_initial"})
def test_migrate_fake_split_initial(self):
"""
Split initial migrations can be faked with --fake-initial.
"""
call_command("migrate", "migrations", "0002", verbosity=0)
call_command("migrate", "migrations", "zero", fake=True, verbosity=0)
out = six.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: False):
call_command("migrate", "migrations", "0002", fake_initial=True, stdout=out, verbosity=1)
value = out.getvalue().lower()
self.assertIn("migrations.0001_initial... faked", value)
self.assertIn("migrations.0002_second... faked", value)
# Fake an apply
call_command("migrate", "migrations", fake=True, verbosity=0)
# Unmigrate everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"})
def test_migrate_conflict_exit(self):
"""
Makes sure that migrate exits if it detects a conflict.
"""
with self.assertRaisesMessage(CommandError, "Conflicting migrations detected"):
call_command("migrate", "migrations")
@ignore_warnings(category=RemovedInDjango110Warning)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_migrate_list(self):
"""
Tests --list output of migrate command
"""
out = six.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: True):
call_command("migrate", list=True, stdout=out, verbosity=0, no_color=False)
self.assertEqual(
'\x1b[1mmigrations\n\x1b[0m'
' [ ] 0001_initial\n'
' [ ] 0002_second\n',
out.getvalue().lower()
)
call_command("migrate", "migrations", "0001", verbosity=0)
out = six.StringIO()
# Giving the explicit app_label tests for selective `show_migration_list` in the command
call_command("migrate", "migrations", list=True, stdout=out, verbosity=0, no_color=True)
self.assertEqual(
'migrations\n'
' [x] 0001_initial\n'
' [ ] 0002_second\n',
out.getvalue().lower()
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_showmigrations_list(self):
"""
Tests --list output of showmigrations command
"""
out = six.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: True):
call_command("showmigrations", format='list', stdout=out, verbosity=0, no_color=False)
self.assertEqual(
'\x1b[1mmigrations\n\x1b[0m'
' [ ] 0001_initial\n'
' [ ] 0002_second\n',
out.getvalue().lower()
)
call_command("migrate", "migrations", "0001", verbosity=0)
out = six.StringIO()
# Giving the explicit app_label tests for selective `show_list` in the command
call_command("showmigrations", "migrations", format='list', stdout=out, verbosity=0, no_color=True)
self.assertEqual(
'migrations\n'
' [x] 0001_initial\n'
' [ ] 0002_second\n',
out.getvalue().lower()
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"})
def test_showmigrations_plan(self):
"""
Tests --plan output of showmigrations command
"""
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertIn(
"[ ] migrations.0001_initial\n"
"[ ] migrations.0003_third\n"
"[ ] migrations.0002_second",
out.getvalue().lower()
)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertIn(
"[ ] migrations.0001_initial\n"
"[ ] migrations.0003_third ... (migrations.0001_initial)\n"
"[ ] migrations.0002_second ... (migrations.0001_initial)",
out.getvalue().lower()
)
call_command("migrate", "migrations", "0003", verbosity=0)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertIn(
"[x] migrations.0001_initial\n"
"[x] migrations.0003_third\n"
"[ ] migrations.0002_second",
out.getvalue().lower()
)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertIn(
"[x] migrations.0001_initial\n"
"[x] migrations.0003_third ... (migrations.0001_initial)\n"
"[ ] migrations.0002_second ... (migrations.0001_initial)",
out.getvalue().lower()
)
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_empty"})
def test_showmigrations_plan_no_migrations(self):
"""
Tests --plan output of showmigrations command without migrations
"""
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertEqual("", out.getvalue().lower())
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertEqual("", out.getvalue().lower())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"})
def test_showmigrations_plan_squashed(self):
"""
Tests --plan output of showmigrations command with squashed migrations.
"""
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertEqual(
"[ ] migrations.1_auto\n"
"[ ] migrations.2_auto\n"
"[ ] migrations.3_squashed_5\n"
"[ ] migrations.6_auto\n"
"[ ] migrations.7_auto\n",
out.getvalue().lower()
)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertEqual(
"[ ] migrations.1_auto\n"
"[ ] migrations.2_auto ... (migrations.1_auto)\n"
"[ ] migrations.3_squashed_5 ... (migrations.2_auto)\n"
"[ ] migrations.6_auto ... (migrations.3_squashed_5)\n"
"[ ] migrations.7_auto ... (migrations.6_auto)\n",
out.getvalue().lower()
)
call_command("migrate", "migrations", "3_squashed_5", verbosity=0)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out)
self.assertEqual(
"[x] migrations.1_auto\n"
"[x] migrations.2_auto\n"
"[x] migrations.3_squashed_5\n"
"[ ] migrations.6_auto\n"
"[ ] migrations.7_auto\n",
out.getvalue().lower()
)
out = six.StringIO()
call_command("showmigrations", format='plan', stdout=out, verbosity=2)
self.assertEqual(
"[x] migrations.1_auto\n"
"[x] migrations.2_auto ... (migrations.1_auto)\n"
"[x] migrations.3_squashed_5 ... (migrations.2_auto)\n"
"[ ] migrations.6_auto ... (migrations.3_squashed_5)\n"
"[ ] migrations.7_auto ... (migrations.6_auto)\n",
out.getvalue().lower()
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_sqlmigrate_forwards(self):
"""
Makes sure that sqlmigrate does something.
"""
out = six.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out)
output = out.getvalue().lower()
index_tx_start = output.find(connection.ops.start_transaction_sql().lower())
index_op_desc_author = output.find('-- create model author')
index_create_table = output.find('create table')
index_op_desc_tribble = output.find('-- create model tribble')
index_op_desc_unique_together = output.find('-- alter unique_together')
index_tx_end = output.find(connection.ops.end_transaction_sql().lower())
self.assertGreater(index_tx_start, -1, "Transaction start not found")
self.assertGreater(index_op_desc_author, index_tx_start,
"Operation description (author) not found or found before transaction start")
self.assertGreater(index_create_table, index_op_desc_author,
"CREATE TABLE not found or found before operation description (author)")
self.assertGreater(index_op_desc_tribble, index_create_table,
"Operation description (tribble) not found or found before CREATE TABLE (author)")
self.assertGreater(index_op_desc_unique_together, index_op_desc_tribble,
"Operation description (unique_together) not found or found before operation description (tribble)")
self.assertGreater(index_tx_end, index_op_desc_unique_together,
"Transaction end not found or found before operation description (unique_together)")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_sqlmigrate_backwards(self):
"""
Makes sure that sqlmigrate does something.
"""
# Cannot generate the reverse SQL unless we've applied the migration.
call_command("migrate", "migrations", verbosity=0)
out = six.StringIO()
call_command("sqlmigrate", "migrations", "0001", stdout=out, backwards=True)
output = out.getvalue().lower()
index_tx_start = output.find(connection.ops.start_transaction_sql().lower())
index_op_desc_unique_together = output.find('-- alter unique_together')
index_op_desc_tribble = output.find('-- create model tribble')
index_op_desc_author = output.find('-- create model author')
index_drop_table = output.rfind('drop table')
index_tx_end = output.find(connection.ops.end_transaction_sql().lower())
self.assertGreater(index_tx_start, -1, "Transaction start not found")
self.assertGreater(index_op_desc_unique_together, index_tx_start,
"Operation description (unique_together) not found or found before transaction start")
self.assertGreater(index_op_desc_tribble, index_op_desc_unique_together,
"Operation description (tribble) not found or found before operation description (unique_together)")
self.assertGreater(index_op_desc_author, index_op_desc_tribble,
"Operation description (author) not found or found before operation description (tribble)")
self.assertGreater(index_drop_table, index_op_desc_author,
"DROP TABLE not found or found before operation description (author)")
self.assertGreater(index_tx_end, index_op_desc_unique_together,
"Transaction end not found or found before DROP TABLE")
# Cleanup by unmigrating everything
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.migrated_unapplied_app",
"migrations.migrations_test_apps.unmigrated_app"])
def test_regression_22823_unmigrated_fk_to_migrated_model(self):
"""
https://code.djangoproject.com/ticket/22823
Assuming you have 3 apps, `A`, `B`, and `C`, such that:
* `A` has migrations
* `B` has a migration we want to apply
* `C` has no migrations, but has an FK to `A`
When we try to migrate "B", an exception occurs because the
"B" was not included in the ProjectState that is used to detect
soft-applied migrations.
"""
call_command("migrate", "migrated_unapplied_app", stdout=six.StringIO())
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_migrate_record_replaced(self):
"""
Running a single squashed migration should record all of the original
replaced migrations as run.
"""
recorder = MigrationRecorder(connection)
out = six.StringIO()
call_command("migrate", "migrations", verbosity=0)
call_command("showmigrations", "migrations", stdout=out, no_color=True)
self.assertEqual(
'migrations\n'
' [x] 0001_squashed_0002 (2 squashed migrations)\n',
out.getvalue().lower()
)
applied_migrations = recorder.applied_migrations()
self.assertIn(("migrations", "0001_initial"), applied_migrations)
self.assertIn(("migrations", "0002_second"), applied_migrations)
self.assertIn(("migrations", "0001_squashed_0002"), applied_migrations)
# Rollback changes
call_command("migrate", "migrations", "zero", verbosity=0)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_migrate_record_squashed(self):
"""
Running migrate for a squashed migration should record as run
if all of the replaced migrations have been run (#25231).
"""
recorder = MigrationRecorder(connection)
recorder.record_applied("migrations", "0001_initial")
recorder.record_applied("migrations", "0002_second")
out = six.StringIO()
call_command("migrate", "migrations", verbosity=0)
call_command("showmigrations", "migrations", stdout=out, no_color=True)
self.assertEqual(
'migrations\n'
' [x] 0001_squashed_0002 (2 squashed migrations)\n',
out.getvalue().lower()
)
self.assertIn(
("migrations", "0001_squashed_0002"),
recorder.applied_migrations()
)
# No changes were actually applied so there is nothing to rollback
class MakeMigrationsTests(MigrationTestBase):
"""
Tests running the makemigrations command.
"""
def setUp(self):
super(MigrationTestBase, self).setUp()
self._old_models = apps.app_configs['migrations'].models.copy()
def tearDown(self):
apps.app_configs['migrations'].models = self._old_models
apps.all_models['migrations'] = self._old_models
apps.clear_cache()
super(MigrationTestBase, self).tearDown()
def test_files_content(self):
self.assertTableNotExists("migrations_unicodemodel")
apps.register_model('migrations', UnicodeModel)
with self.temporary_migration_module() as migration_dir:
call_command("makemigrations", "migrations", verbosity=0)
# Check for empty __init__.py file in migrations folder
init_file = os.path.join(migration_dir, "__init__.py")
self.assertTrue(os.path.exists(init_file))
with open(init_file, 'r') as fp:
content = force_text(fp.read())
self.assertEqual(content, '')
# Check for existing 0001_initial.py file in migration folder
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
with codecs.open(initial_file, 'r', encoding='utf-8') as fp:
content = fp.read()
self.assertIn('# -*- coding: utf-8 -*-', content)
self.assertIn('migrations.CreateModel', content)
self.assertIn('initial = True', content)
if six.PY3:
self.assertIn('úñí©óðé µóðéø', content) # Meta.verbose_name
self.assertIn('úñí©óðé µóðéøß', content) # Meta.verbose_name_plural
self.assertIn('ÚÑÍ¢ÓÐÉ', content) # title.verbose_name
self.assertIn('“Ðjáñgó”', content) # title.default
else:
self.assertIn('\\xfa\\xf1\\xed\\xa9\\xf3\\xf0\\xe9 \\xb5\\xf3\\xf0\\xe9\\xf8', content) # Meta.verbose_name
self.assertIn('\\xfa\\xf1\\xed\\xa9\\xf3\\xf0\\xe9 \\xb5\\xf3\\xf0\\xe9\\xf8\\xdf', content) # Meta.verbose_name_plural
self.assertIn('\\xda\\xd1\\xcd\\xa2\\xd3\\xd0\\xc9', content) # title.verbose_name
self.assertIn('\\u201c\\xd0j\\xe1\\xf1g\\xf3\\u201d', content) # title.default
def test_makemigrations_order(self):
"""
makemigrations should recognize number-only migrations (0001.py).
"""
module = 'migrations.test_migrations_order'
with self.temporary_migration_module(module=module) as migration_dir:
if hasattr(importlib, 'invalidate_caches'):
# Python 3 importlib caches os.listdir() on some platforms like
# Mac OS X (#23850).
importlib.invalidate_caches()
call_command('makemigrations', 'migrations', '--empty', '-n', 'a', '-v', '0')
self.assertTrue(os.path.exists(os.path.join(migration_dir, '0002_a.py')))
def test_failing_migration(self):
# If a migration fails to serialize, it shouldn't generate an empty file. #21280
apps.register_model('migrations', UnserializableModel)
with self.temporary_migration_module() as migration_dir:
with six.assertRaisesRegex(self, ValueError, r'Cannot serialize'):
call_command("makemigrations", "migrations", verbosity=0)
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertFalse(os.path.exists(initial_file))
def test_makemigrations_conflict_exit(self):
"""
Makes sure that makemigrations exits if it detects a conflict.
"""
with self.temporary_migration_module(module="migrations.test_migrations_conflict"):
with self.assertRaises(CommandError):
call_command("makemigrations")
def test_makemigrations_merge_no_conflict(self):
"""
Makes sure that makemigrations exits if in merge mode with no conflicts.
"""
out = six.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations"):
try:
call_command("makemigrations", merge=True, stdout=out)
except CommandError:
self.fail("Makemigrations errored in merge mode with no conflicts")
self.assertIn("No conflicts detected to merge.", out.getvalue())
def test_makemigrations_no_app_sys_exit(self):
"""
Makes sure that makemigrations exits if a non-existent app is specified.
"""
err = six.StringIO()
with self.assertRaises(SystemExit):
call_command("makemigrations", "this_app_does_not_exist", stderr=err)
self.assertIn("'this_app_does_not_exist' could not be found.", err.getvalue())
def test_makemigrations_empty_no_app_specified(self):
"""
Makes sure that makemigrations exits if no app is specified with 'empty' mode.
"""
with self.assertRaises(CommandError):
call_command("makemigrations", empty=True)
def test_makemigrations_empty_migration(self):
"""
Makes sure that makemigrations properly constructs an empty migration.
"""
with self.temporary_migration_module() as migration_dir:
try:
call_command("makemigrations", "migrations", empty=True, verbosity=0)
except CommandError:
self.fail("Makemigrations errored in creating empty migration for a proper app.")
# Check for existing 0001_initial.py file in migration folder
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
with codecs.open(initial_file, 'r', encoding='utf-8') as fp:
content = fp.read()
self.assertIn('# -*- coding: utf-8 -*-', content)
# Remove all whitespace to check for empty dependencies and operations
content = content.replace(' ', '')
self.assertIn('dependencies=[\n]', content)
self.assertIn('operations=[\n]', content)
def test_makemigrations_no_changes_no_apps(self):
"""
Makes sure that makemigrations exits when there are no changes and no apps are specified.
"""
out = six.StringIO()
call_command("makemigrations", stdout=out)
self.assertIn("No changes detected", out.getvalue())
def test_makemigrations_no_changes(self):
"""
Makes sure that makemigrations exits when there are no changes to an app.
"""
out = six.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_no_changes"):
call_command("makemigrations", "migrations", stdout=out)
self.assertIn("No changes detected in app 'migrations'", out.getvalue())
def test_makemigrations_no_apps_initial(self):
"""
makemigrations should detect initial is needed on empty migration
modules if no app provided.
"""
out = six.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_empty"):
call_command("makemigrations", stdout=out)
self.assertIn("0001_initial.py", out.getvalue())
def test_makemigrations_migrations_announce(self):
"""
Makes sure that makemigrations announces the migration at the default verbosity level.
"""
out = six.StringIO()
with self.temporary_migration_module():
call_command("makemigrations", "migrations", stdout=out)
self.assertIn("Migrations for 'migrations'", out.getvalue())
def test_makemigrations_no_common_ancestor(self):
"""
Makes sure that makemigrations fails to merge migrations with no common ancestor.
"""
with self.assertRaises(ValueError) as context:
with self.temporary_migration_module(module="migrations.test_migrations_no_ancestor"):
call_command("makemigrations", "migrations", merge=True)
exception_message = str(context.exception)
self.assertIn("Could not find common ancestor of", exception_message)
self.assertIn("0002_second", exception_message)
self.assertIn("0002_conflicting_second", exception_message)
def test_makemigrations_interactive_reject(self):
"""
Makes sure that makemigrations enters and exits interactive mode properly.
"""
# Monkeypatch interactive questioner to auto reject
with mock.patch('django.db.migrations.questioner.input', mock.Mock(return_value='N')):
try:
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command("makemigrations", "migrations", merge=True, interactive=True, verbosity=0)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertFalse(os.path.exists(merge_file))
except CommandError:
self.fail("Makemigrations failed while running interactive questioner")
def test_makemigrations_interactive_accept(self):
"""
Makes sure that makemigrations enters interactive mode and merges properly.
"""
# Monkeypatch interactive questioner to auto accept
with mock.patch('django.db.migrations.questioner.input', mock.Mock(return_value='y')):
out = six.StringIO()
try:
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command("makemigrations", "migrations", merge=True, interactive=True, stdout=out)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertTrue(os.path.exists(merge_file))
except CommandError:
self.fail("Makemigrations failed while running interactive questioner")
self.assertIn("Created new merge migration", force_text(out.getvalue()))
def test_makemigrations_non_interactive_not_null_addition(self):
"""
Tests that non-interactive makemigrations fails when a default is missing on a new not-null field.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_int = models.IntegerField()
class Meta:
app_label = "migrations"
out = six.StringIO()
with self.assertRaises(SystemExit):
with self.temporary_migration_module(module="migrations.test_migrations_no_default"):
call_command("makemigrations", "migrations", interactive=False, stdout=out)
def test_makemigrations_non_interactive_not_null_alteration(self):
"""
Tests that non-interactive makemigrations fails when a default is missing on a field changed to not-null.
"""
class Author(models.Model):
name = models.CharField(max_length=255)
slug = models.SlugField()
age = models.IntegerField(default=0)
class Meta:
app_label = "migrations"
out = six.StringIO()
try:
with self.temporary_migration_module(module="migrations.test_migrations"):
call_command("makemigrations", "migrations", interactive=False, stdout=out)
except CommandError:
self.fail("Makemigrations failed while running non-interactive questioner.")
self.assertIn("Alter field slug on author", force_text(out.getvalue()))
def test_makemigrations_non_interactive_no_model_rename(self):
"""
Makes sure that makemigrations adds and removes a possible model rename in non-interactive mode.
"""
class RenamedModel(models.Model):
silly_field = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
out = six.StringIO()
try:
with self.temporary_migration_module(module="migrations.test_migrations_no_default"):
call_command("makemigrations", "migrations", interactive=False, stdout=out)
except CommandError:
self.fail("Makemigrations failed while running non-interactive questioner")
self.assertIn("Delete model SillyModel", force_text(out.getvalue()))
self.assertIn("Create model RenamedModel", force_text(out.getvalue()))
def test_makemigrations_non_interactive_no_field_rename(self):
"""
Makes sure that makemigrations adds and removes a possible field rename in non-interactive mode.
"""
class SillyModel(models.Model):
silly_rename = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
out = six.StringIO()
try:
with self.temporary_migration_module(module="migrations.test_migrations_no_default"):
call_command("makemigrations", "migrations", interactive=False, stdout=out)
except CommandError:
self.fail("Makemigrations failed while running non-interactive questioner")
self.assertIn("Remove field silly_field from sillymodel", force_text(out.getvalue()))
self.assertIn("Add field silly_rename to sillymodel", force_text(out.getvalue()))
def test_makemigrations_handle_merge(self):
"""
Makes sure that makemigrations properly merges the conflicting migrations with --noinput.
"""
out = six.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command("makemigrations", "migrations", merge=True, interactive=False, stdout=out)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertTrue(os.path.exists(merge_file))
output = force_text(out.getvalue())
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
self.assertIn("Created new merge migration", output)
def test_makemigration_merge_dry_run(self):
"""
Makes sure that makemigrations respects --dry-run option when fixing
migration conflicts (#24427).
"""
out = six.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command("makemigrations", "migrations", dry_run=True, merge=True, interactive=False, stdout=out)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertFalse(os.path.exists(merge_file))
output = force_text(out.getvalue())
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
self.assertNotIn("Created new merge migration", output)
def test_makemigration_merge_dry_run_verbosity_3(self):
"""
Makes sure that `makemigrations --merge --dry-run` writes the merge
migration file to stdout with `verbosity == 3` (#24427).
"""
out = six.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command("makemigrations", "migrations", dry_run=True, merge=True, interactive=False,
stdout=out, verbosity=3)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertFalse(os.path.exists(merge_file))
output = force_text(out.getvalue())
self.assertIn("Merging migrations", output)
self.assertIn("Branch 0002_second", output)
self.assertIn("Branch 0002_conflicting_second", output)
self.assertNotIn("Created new merge migration", output)
# Additional output caused by verbosity 3
# The complete merge migration file that would be written
self.assertIn("# -*- coding: utf-8 -*-", output)
self.assertIn("class Migration(migrations.Migration):", output)
self.assertIn("dependencies = [", output)
self.assertIn("('migrations', '0002_second')", output)
self.assertIn("('migrations', '0002_conflicting_second')", output)
self.assertIn("operations = [", output)
self.assertIn("]", output)
def test_makemigrations_dry_run(self):
"""
Ticket #22676 -- `makemigrations --dry-run` should not ask for defaults.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_date = models.DateField() # Added field without a default
class Meta:
app_label = "migrations"
out = six.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_no_default"):
call_command("makemigrations", "migrations", dry_run=True, stdout=out)
# Output the expected changes directly, without asking for defaults
self.assertIn("Add field silly_date to sillymodel", out.getvalue())
def test_makemigrations_dry_run_verbosity_3(self):
"""
Ticket #22675 -- Allow `makemigrations --dry-run` to output the
migrations file to stdout (with verbosity == 3).
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
silly_char = models.CharField(default="")
class Meta:
app_label = "migrations"
out = six.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_no_default"):
call_command("makemigrations", "migrations", dry_run=True, stdout=out, verbosity=3)
# Normal --dry-run output
self.assertIn("- Add field silly_char to sillymodel", out.getvalue())
# Additional output caused by verbosity 3
# The complete migrations file that would be written
self.assertIn("# -*- coding: utf-8 -*-", out.getvalue())
self.assertIn("class Migration(migrations.Migration):", out.getvalue())
self.assertIn("dependencies = [", out.getvalue())
self.assertIn("('migrations', '0001_initial'),", out.getvalue())
self.assertIn("migrations.AddField(", out.getvalue())
self.assertIn("model_name='sillymodel',", out.getvalue())
self.assertIn("name='silly_char',", out.getvalue())
def test_makemigrations_migrations_modules_path_not_exist(self):
"""
Ticket #22682 -- Makemigrations fails when specifying custom location
for migration files (using MIGRATION_MODULES) if the custom path
doesn't already exist.
"""
class SillyModel(models.Model):
silly_field = models.BooleanField(default=False)
class Meta:
app_label = "migrations"
out = six.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations_path_doesnt_exist.foo.bar") as migration_dir:
call_command("makemigrations", "migrations", stdout=out)
# Migrations file is actually created in the expected path.
initial_file = os.path.join(migration_dir, "0001_initial.py")
self.assertTrue(os.path.exists(initial_file))
# Command output indicates the migration is created.
self.assertIn(" - Create model SillyModel", out.getvalue())
def test_makemigrations_interactive_by_default(self):
"""
Makes sure that the user is prompted to merge by default if there are
conflicts and merge is True. Answer negative to differentiate it from
behavior when --noinput is specified.
"""
# Monkeypatch interactive questioner to auto reject
out = six.StringIO()
with mock.patch('django.db.migrations.questioner.input', mock.Mock(return_value='N')):
try:
with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir:
call_command("makemigrations", "migrations", merge=True, stdout=out)
merge_file = os.path.join(migration_dir, '0003_merge.py')
# This will fail if interactive is False by default
self.assertFalse(os.path.exists(merge_file))
except CommandError:
self.fail("Makemigrations failed while running interactive questioner")
self.assertNotIn("Created new merge migration", out.getvalue())
@override_settings(
INSTALLED_APPS=[
"migrations",
"migrations.migrations_test_apps.unspecified_app_with_conflict"])
def test_makemigrations_unspecified_app_with_conflict_no_merge(self):
"""
Makes sure that makemigrations does not raise a CommandError when an
unspecified app has conflicting migrations.
"""
try:
with self.temporary_migration_module(module="migrations.test_migrations_no_changes"):
call_command("makemigrations", "migrations", merge=False, verbosity=0)
except CommandError:
self.fail("Makemigrations fails resolving conflicts in an unspecified app")
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.unspecified_app_with_conflict"])
def test_makemigrations_unspecified_app_with_conflict_merge(self):
"""
Makes sure that makemigrations does not create a merge for an
unspecified app even if it has conflicting migrations.
"""
# Monkeypatch interactive questioner to auto accept
with mock.patch('django.db.migrations.questioner.input', mock.Mock(return_value='y')):
out = six.StringIO()
try:
with self.temporary_migration_module(app_label="migrated_app") as migration_dir:
call_command("makemigrations", "migrated_app", merge=True, interactive=True, stdout=out)
merge_file = os.path.join(migration_dir, '0003_merge.py')
self.assertFalse(os.path.exists(merge_file))
self.assertIn("No conflicts detected to merge.", out.getvalue())
except CommandError:
self.fail("Makemigrations fails resolving conflicts in an unspecified app")
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.migrated_app",
"migrations.migrations_test_apps.conflicting_app_with_dependencies"])
def test_makemigrations_merge_dont_output_dependency_operations(self):
"""
Makes sure that makemigrations --merge does not output any operations
from apps that don't belong to a given app.
"""
# Monkeypatch interactive questioner to auto accept
with mock.patch('django.db.migrations.questioner.input', mock.Mock(return_value='N')):
out = six.StringIO()
with mock.patch('django.core.management.color.supports_color', lambda *args: False):
call_command(
"makemigrations", "conflicting_app_with_dependencies",
merge=True, interactive=True, stdout=out
)
val = out.getvalue().lower()
self.assertIn('merging conflicting_app_with_dependencies\n', val)
self.assertIn(
' branch 0002_conflicting_second\n'
' - create model something\n',
val
)
self.assertIn(
' branch 0002_second\n'
' - delete model tribble\n'
' - remove field silly_field from author\n'
' - add field rating to author\n'
' - create model book\n',
val
)
def test_makemigrations_with_custom_name(self):
"""
Makes sure that makemigrations generate a custom migration.
"""
with self.temporary_migration_module() as migration_dir:
def cmd(migration_count, migration_name, *args):
try:
call_command("makemigrations", "migrations", "--verbosity", "0", "--name", migration_name, *args)
except CommandError:
self.fail("Makemigrations errored in creating empty migration with custom name for a proper app.")
migration_file = os.path.join(migration_dir, "%s_%s.py" % (migration_count, migration_name))
# Check for existing migration file in migration folder
self.assertTrue(os.path.exists(migration_file))
with codecs.open(migration_file, "r", encoding="utf-8") as fp:
content = fp.read()
self.assertIn("# -*- coding: utf-8 -*-", content)
content = content.replace(" ", "")
return content
# generate an initial migration
migration_name_0001 = "my_initial_migration"
content = cmd("0001", migration_name_0001)
self.assertIn("dependencies=[\n]", content)
# Python 3 importlib caches os.listdir() on some platforms like
# Mac OS X (#23850).
if hasattr(importlib, 'invalidate_caches'):
importlib.invalidate_caches()
# generate an empty migration
migration_name_0002 = "my_custom_migration"
content = cmd("0002", migration_name_0002, "--empty")
self.assertIn("dependencies=[\n('migrations','0001_%s'),\n]" % migration_name_0001, content)
self.assertIn("operations=[\n]", content)
def test_makemigrations_exit(self):
"""
makemigrations --exit should exit with sys.exit(1) when there are no
changes to an app.
"""
with self.temporary_migration_module():
call_command("makemigrations", "--exit", "migrations", verbosity=0)
with self.temporary_migration_module(module="migrations.test_migrations_no_changes"):
with self.assertRaises(SystemExit):
call_command("makemigrations", "--exit", "migrations", verbosity=0)
class SquashMigrationsTests(MigrationTestBase):
"""
Tests running the squashmigrations command.
"""
def test_squashmigrations_squashes(self):
"""
Tests that squashmigrations squashes migrations.
"""
with self.temporary_migration_module(module="migrations.test_migrations") as migration_dir:
call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=0)
squashed_migration_file = os.path.join(migration_dir, "0001_squashed_0002_second.py")
self.assertTrue(os.path.exists(squashed_migration_file))
def test_squashmigrations_initial_attribute(self):
with self.temporary_migration_module(module="migrations.test_migrations") as migration_dir:
call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=0)
squashed_migration_file = os.path.join(migration_dir, "0001_squashed_0002_second.py")
with codecs.open(squashed_migration_file, "r", encoding="utf-8") as fp:
content = fp.read()
self.assertIn("initial = True", content)
def test_squashmigrations_optimizes(self):
"""
Tests that squashmigrations optimizes operations.
"""
out = six.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations"):
call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=1, stdout=out)
self.assertIn("Optimized from 7 operations to 3 operations.", force_text(out.getvalue()))
def test_ticket_23799_squashmigrations_no_optimize(self):
"""
Makes sure that squashmigrations --no-optimize really doesn't optimize operations.
"""
out = six.StringIO()
with self.temporary_migration_module(module="migrations.test_migrations"):
call_command("squashmigrations", "migrations", "0002",
interactive=False, verbosity=1, no_optimize=True, stdout=out)
self.assertIn("Skipping optimization", force_text(out.getvalue()))
| bsd-3-clause |
patrickod/stem | test/unit/descriptor/extrainfo_descriptor.py | 2 | 29131 | """
Unit tests for stem.descriptor.extrainfo_descriptor.
"""
import datetime
import functools
import re
import unittest
import stem.descriptor
import test.require
from stem.descriptor.extrainfo_descriptor import (
RelayExtraInfoDescriptor,
BridgeExtraInfoDescriptor,
DirResponse,
DirStat,
)
from test.unit.descriptor import (
get_resource,
base_expect_invalid_attr,
base_expect_invalid_attr_for_text,
)
expect_invalid_attr = functools.partial(base_expect_invalid_attr, RelayExtraInfoDescriptor, 'nickname', 'Unnamed')
expect_invalid_attr_for_text = functools.partial(base_expect_invalid_attr_for_text, RelayExtraInfoDescriptor, 'nickname', 'Unnamed')
class TestExtraInfoDescriptor(unittest.TestCase):
def test_metrics_relay_descriptor(self):
"""
Parses and checks our results against an extrainfo descriptor from metrics.
"""
descriptor_file = open(get_resource('extrainfo_relay_descriptor'), 'rb')
expected_signature = """-----BEGIN SIGNATURE-----
K5FSywk7qvw/boA4DQcqkls6Ize5vcBYfhQ8JnOeRQC9+uDxbnpm3qaYN9jZ8myj
k0d2aofcVbHr4fPQOSST0LXDrhFl5Fqo5um296zpJGvRUeO6S44U/EfJAGShtqWw
7LZqklu+gVvhMKREpchVqlAwXkWR44VENm24Hs+mT3M=
-----END SIGNATURE-----"""
desc = next(stem.descriptor.parse_file(descriptor_file, 'extra-info 1.0'))
self.assertEqual('NINJA', desc.nickname)
self.assertEqual('B2289C3EAB83ECD6EB916A2F481A02E6B76A0A48', desc.fingerprint)
self.assertEqual(datetime.datetime(2012, 5, 5, 17, 3, 50), desc.published)
self.assertEqual(datetime.datetime(2012, 5, 5, 17, 2, 45), desc.read_history_end)
self.assertEqual(900, desc.read_history_interval)
self.assertEqual(datetime.datetime(2012, 5, 5, 17, 2, 45), desc.write_history_end)
self.assertEqual(900, desc.write_history_interval)
self.assertEqual(datetime.datetime(2012, 5, 5, 17, 2, 45), desc.dir_read_history_end)
self.assertEqual(900, desc.dir_read_history_interval)
self.assertEqual(datetime.datetime(2012, 5, 5, 17, 2, 45), desc.dir_write_history_end)
self.assertEqual(900, desc.dir_write_history_interval)
self.assertEqual(expected_signature, desc.signature)
self.assertEqual('00A57A9AAB5EA113898E2DD02A755E31AFC27227', desc.digest())
self.assertEqual([], desc.get_unrecognized_lines())
# The read-history, write-history, dirreq-read-history, and
# dirreq-write-history lines are pretty long so just checking
# the initial contents for the line and parsed values.
read_values_start = [3309568, 9216, 41984, 27648, 123904]
self.assertEqual(read_values_start, desc.read_history_values[:5])
write_values_start = [1082368, 19456, 50176, 272384, 485376]
self.assertEqual(write_values_start, desc.write_history_values[:5])
dir_read_values_start = [0, 0, 0, 0, 33792, 27648, 48128]
self.assertEqual(dir_read_values_start, desc.dir_read_history_values[:7])
dir_write_values_start = [0, 0, 0, 227328, 349184, 382976, 738304]
self.assertEqual(dir_write_values_start, desc.dir_write_history_values[:7])
def test_metrics_bridge_descriptor(self):
"""
Parses and checks our results against an extrainfo bridge descriptor from
metrics.
"""
descriptor_file = open(get_resource('extrainfo_bridge_descriptor'), 'rb')
expected_dir_v2_responses = {
DirResponse.OK: 0,
DirResponse.UNAVAILABLE: 0,
DirResponse.NOT_FOUND: 0,
DirResponse.NOT_MODIFIED: 0,
DirResponse.BUSY: 0,
}
expected_dir_v3_responses = {
DirResponse.OK: 72,
DirResponse.NOT_ENOUGH_SIGS: 0,
DirResponse.UNAVAILABLE: 0,
DirResponse.NOT_FOUND: 0,
DirResponse.NOT_MODIFIED: 0,
DirResponse.BUSY: 0,
}
desc = next(stem.descriptor.parse_file(descriptor_file, 'bridge-extra-info 1.0'))
self.assertEqual('ec2bridgereaac65a3', desc.nickname)
self.assertEqual('1EC248422B57D9C0BD751892FE787585407479A4', desc.fingerprint)
self.assertEqual(datetime.datetime(2012, 6, 8, 2, 21, 27), desc.published)
self.assertEqual(datetime.datetime(2012, 6, 8, 2, 10, 38), desc.read_history_end)
self.assertEqual(900, desc.read_history_interval)
self.assertEqual(datetime.datetime(2012, 6, 8, 2, 10, 38), desc.write_history_end)
self.assertEqual(900, desc.write_history_interval)
self.assertEqual(datetime.datetime(2012, 6, 8, 2, 10, 38), desc.dir_read_history_end)
self.assertEqual(900, desc.dir_read_history_interval)
self.assertEqual(datetime.datetime(2012, 6, 8, 2, 10, 38), desc.dir_write_history_end)
self.assertEqual(900, desc.dir_write_history_interval)
self.assertEqual('00A2AECCEAD3FEE033CFE29893387143146728EC', desc.digest())
self.assertEqual([], desc.get_unrecognized_lines())
read_values_start = [337920, 437248, 3995648, 48726016]
self.assertEqual(read_values_start, desc.read_history_values[:4])
write_values_start = [343040, 991232, 5649408, 49548288]
self.assertEqual(write_values_start, desc.write_history_values[:4])
dir_read_values_start = [0, 71680, 99328, 25600]
self.assertEqual(dir_read_values_start, desc.dir_read_history_values[:4])
dir_write_values_start = [5120, 664576, 2419712, 578560]
self.assertEqual(dir_write_values_start, desc.dir_write_history_values[:4])
self.assertEqual({}, desc.dir_v2_requests)
self.assertEqual({}, desc.dir_v3_requests)
self.assertEqual(expected_dir_v2_responses, desc.dir_v2_responses)
self.assertEqual(expected_dir_v3_responses, desc.dir_v3_responses)
self.assertEqual({}, desc.dir_v2_responses_unknown)
self.assertEqual({}, desc.dir_v2_responses_unknown)
@test.require.cryptography
def test_descriptor_signing(self):
RelayExtraInfoDescriptor.create(sign = True)
self.assertRaisesRegexp(NotImplementedError, 'Signing of BridgeExtraInfoDescriptor not implemented', BridgeExtraInfoDescriptor.create, sign = True)
def test_multiple_metrics_bridge_descriptors(self):
"""
Check that we can read bridge descriptors when there's multiple in a file.
"""
descriptor_file = open(get_resource('extrainfo_bridge_descriptor_multiple'), 'rb')
desc_list = list(stem.descriptor.parse_file(descriptor_file))
self.assertEqual(6, len(desc_list))
self.assertEqual('909B07DB17E21D263C55794AB815BF1DB195FDD9', desc_list[0].fingerprint)
self.assertEqual('7F7798A3CBB0F643B1CFCE3FD4F2B7C553764498', desc_list[1].fingerprint)
self.assertEqual('B4869206C1EEA4A090FE614155BD6942701F80F1', desc_list[2].fingerprint)
self.assertEqual('C18896EB6274DC8123491FAE1DD17E1769C54C4F', desc_list[3].fingerprint)
self.assertEqual('478B4CB438302981DE9AAF246F48DBE57F69050A', desc_list[4].fingerprint)
self.assertEqual('25D9D52A0350B42E69C8AB7CE945DB1CA38DA0CF', desc_list[5].fingerprint)
def test_with_ed25519(self):
"""
Parses a descriptor with a ed25519 identity key.
"""
with open(get_resource('extrainfo_descriptor_with_ed25519'), 'rb') as descriptor_file:
desc = next(stem.descriptor.parse_file(descriptor_file, validate = True))
self.assertEqual('silverfoxden', desc.nickname)
self.assertEqual('4970B1DC3DBC8D82D7F1E43FF44B28DBF4765A4E', desc.fingerprint)
self.assertTrue('AQQABhz0AQFcf5tGWLvPvr' in desc.ed25519_certificate)
self.assertEqual('g6Zg7Er8K7C1etmt7p20INE1ExIvMRPvhwt6sjbLqEK+EtQq8hT+86hQ1xu7cnz6bHee+Zhhmcc4JamV4eiMAw', desc.ed25519_signature)
self.assertEqual([], desc.get_unrecognized_lines())
def test_bridge_with_ed25519(self):
"""
Parses a bridge descriptor with a ed25519 identity key.
"""
with open(get_resource('bridge_extrainfo_descriptor_with_ed25519'), 'rb') as descriptor_file:
desc = next(stem.descriptor.parse_file(descriptor_file, validate = True))
self.assertEqual('Unnamed', desc.nickname)
self.assertEqual('B8AB331047F1C1637EFE07FB1B94CCC0FE0ABFFA', desc.fingerprint)
self.assertFalse(hasattr(desc, 'ed25519_certificate'))
self.assertEqual('VigmhxML9uw8CT1XeGqZ8KLMhKk6AOKnChQt24usBbI', desc.ed25519_certificate_hash)
self.assertEqual('7DSOQz9eGgjDX6GT7qcrVViK8yqJD4aoEnuhdAgYtgA', desc.router_digest_sha256)
self.assertEqual([], desc.get_unrecognized_lines())
def test_nonascii_v3_reqs(self):
"""
Malformed descriptor with non-ascii content for the 'dirreq-v3-reqs' line.
"""
with open(get_resource('unparseable/extrainfo_nonascii_v3_reqs'), 'rb') as descriptor_file:
desc_generator = stem.descriptor.parse_file(descriptor_file, 'extra-info 1.0', validate = True)
exc_msg = "'dirreq-v3-reqs' line had non-ascii content: S?=4026597208,S?=4026597208,S?=4026597208,S?=4026597208,S?=4026597208,S?=4026597208,??=4026591624,6?=4026537520,6?=4026537520,6?=4026537520,us=8"
self.assertRaisesRegexp(ValueError, re.escape(exc_msg), next, desc_generator)
def test_minimal_extrainfo_descriptor(self):
"""
Basic sanity check that we can parse an extrainfo descriptor with minimal
attributes.
"""
desc = RelayExtraInfoDescriptor.create()
self.assertTrue(desc.nickname.startswith('Unnamed'))
def test_unrecognized_line(self):
"""
Includes unrecognized content in the descriptor.
"""
desc = RelayExtraInfoDescriptor.create({'pepperjack': 'is oh so tasty!'})
self.assertEqual(['pepperjack is oh so tasty!'], desc.get_unrecognized_lines())
def test_proceeding_line(self):
"""
Includes a line prior to the 'extra-info' entry.
"""
expect_invalid_attr_for_text(self, b'exit-streams-opened port=80\n' + RelayExtraInfoDescriptor.content())
def test_trailing_line(self):
"""
Includes a line after the 'router-signature' entry.
"""
expect_invalid_attr_for_text(self, RelayExtraInfoDescriptor.content() + b'\nexit-streams-opened port=80')
def test_extrainfo_line_missing_fields(self):
"""
Checks that validation catches when the extra-info line is missing fields
and that without validation both the nickname and fingerprint are left as
None.
"""
test_entries = (
'ninja',
'ninja ',
'B2289C3EAB83ECD6EB916A2F481A02E6B76A0A48',
' B2289C3EAB83ECD6EB916A2F481A02E6B76A0A48',
)
for entry in test_entries:
desc = expect_invalid_attr(self, {'extra-info': entry}, 'nickname')
self.assertEqual(None, desc.nickname)
self.assertEqual(None, desc.fingerprint)
def test_geoip_db_digest(self):
"""
Parses the geoip-db-digest and geoip6-db-digest lines with valid and
invalid data.
"""
geoip_db_digest = '916A3CA8B7DF61473D5AE5B21711F35F301CE9E8'
desc = RelayExtraInfoDescriptor.create({'geoip-db-digest': geoip_db_digest})
self.assertEqual(geoip_db_digest, desc.geoip_db_digest)
desc = RelayExtraInfoDescriptor.create({'geoip6-db-digest': geoip_db_digest})
self.assertEqual(geoip_db_digest, desc.geoip6_db_digest)
test_entries = (
'',
'916A3CA8B7DF61473D5AE5B21711F35F301CE9E',
'916A3CA8B7DF61473D5AE5B21711F35F301CE9E88',
'916A3CA8B7DF61473D5AE5B21711F35F301CE9EG',
'916A3CA8B7DF61473D5AE5B21711F35F301CE9E-',
)
for entry in test_entries:
expect_invalid_attr(self, {'geoip-db-digest': entry}, 'geoip_db_digest')
expect_invalid_attr(self, {'geoip6-db-digest': entry}, 'geoip6_db_digest')
def test_cell_circuits_per_decile(self):
"""
Parses the cell-circuits-per-decile line with valid and invalid data.
"""
test_entries = (
('0', 0),
('11', 11),
)
for entry in ('0', '11', '25'):
desc = RelayExtraInfoDescriptor.create({'cell-circuits-per-decile': entry})
self.assertEqual(int(entry), desc.cell_circuits_per_decile)
test_entries = (
'',
' ',
'-5',
'blarg',
)
for entry in test_entries:
expect_invalid_attr(self, {'cell-circuits-per-decile': entry}, 'cell_circuits_per_decile')
def test_dir_response_lines(self):
"""
Parses the dirreq-v2-resp and dirreq-v3-resp lines with valid and invalid
data.
"""
for keyword in ('dirreq-v2-resp', 'dirreq-v3-resp'):
attr = keyword.replace('-', '_').replace('dirreq', 'dir').replace('resp', 'responses')
unknown_attr = attr + '_unknown'
test_value = 'ok=0,unavailable=0,not-found=984,not-modified=0,something-new=7'
desc = RelayExtraInfoDescriptor.create({keyword: test_value})
self.assertEqual(0, getattr(desc, attr)[DirResponse.OK])
self.assertEqual(0, getattr(desc, attr)[DirResponse.UNAVAILABLE])
self.assertEqual(984, getattr(desc, attr)[DirResponse.NOT_FOUND])
self.assertEqual(0, getattr(desc, attr)[DirResponse.NOT_MODIFIED])
self.assertEqual(7, getattr(desc, unknown_attr)['something-new'])
test_entries = (
'ok=-4',
'ok:4',
'ok=4.not-found=3',
)
for entry in test_entries:
desc = expect_invalid_attr(self, {keyword: entry})
self.assertEqual(None, getattr(desc, attr))
self.assertEqual(None, getattr(desc, unknown_attr))
def test_dir_stat_lines(self):
"""
Parses the dirreq-v2-direct-dl, dirreq-v3-direct-dl, dirreq-v2-tunneled-dl,
and dirreq-v3-tunneled-dl lines with valid and invalid data.
"""
for keyword in ('dirreq-v2-direct-dl', 'dirreq-v2-direct-dl', 'dirreq-v2-tunneled-dl', 'dirreq-v2-tunneled-dl'):
attr = keyword.replace('-', '_').replace('dirreq', 'dir')
unknown_attr = attr + '_unknown'
test_value = 'complete=2712,timeout=32,running=4,min=741,d1=14507,d2=22702,q1=28881,d3=38277,d4=73729,md=111455,d6=168231,d7=257218,q3=319833,d8=390507,d9=616301,something-new=11,max=29917857'
desc = RelayExtraInfoDescriptor.create({keyword: test_value})
self.assertEqual(2712, getattr(desc, attr)[DirStat.COMPLETE])
self.assertEqual(32, getattr(desc, attr)[DirStat.TIMEOUT])
self.assertEqual(4, getattr(desc, attr)[DirStat.RUNNING])
self.assertEqual(741, getattr(desc, attr)[DirStat.MIN])
self.assertEqual(14507, getattr(desc, attr)[DirStat.D1])
self.assertEqual(22702, getattr(desc, attr)[DirStat.D2])
self.assertEqual(28881, getattr(desc, attr)[DirStat.Q1])
self.assertEqual(38277, getattr(desc, attr)[DirStat.D3])
self.assertEqual(73729, getattr(desc, attr)[DirStat.D4])
self.assertEqual(111455, getattr(desc, attr)[DirStat.MD])
self.assertEqual(168231, getattr(desc, attr)[DirStat.D6])
self.assertEqual(257218, getattr(desc, attr)[DirStat.D7])
self.assertEqual(319833, getattr(desc, attr)[DirStat.Q3])
self.assertEqual(390507, getattr(desc, attr)[DirStat.D8])
self.assertEqual(616301, getattr(desc, attr)[DirStat.D9])
self.assertEqual(29917857, getattr(desc, attr)[DirStat.MAX])
self.assertEqual(11, getattr(desc, unknown_attr)['something-new'])
test_entries = (
'complete=-4',
'complete:4',
'complete=4.timeout=3',
)
for entry in test_entries:
desc = expect_invalid_attr(self, {keyword: entry})
self.assertEqual(None, getattr(desc, attr))
self.assertEqual(None, getattr(desc, unknown_attr))
def test_conn_bi_direct(self):
"""
Parses the conn-bi-direct line with valid and invalid data.
"""
desc = RelayExtraInfoDescriptor.create({'conn-bi-direct': '2012-05-03 12:07:50 (500 s) 277431,12089,0,2134'})
self.assertEqual(datetime.datetime(2012, 5, 3, 12, 7, 50), desc.conn_bi_direct_end)
self.assertEqual(500, desc.conn_bi_direct_interval)
self.assertEqual(277431, desc.conn_bi_direct_below)
self.assertEqual(12089, desc.conn_bi_direct_read)
self.assertEqual(0, desc.conn_bi_direct_write)
self.assertEqual(2134, desc.conn_bi_direct_both)
test_entries = (
'',
'2012-05-03',
'2012-05-03 12:07:60 (500 s)',
'2012-05-03 12:07:50 (500 s',
'2012-05-03 12:07:50 (500s)',
'2012-05-03 12:07:50 (500 s)11',
'2012-05-03 12:07:50 (500 s) 277431,12089,0',
'2012-05-03 12:07:50 (500 s) 277431,12089,0a,2134',
'2012-05-03 12:07:50 (500 s) -277431,12089,0,2134',
)
for entry in test_entries:
desc = expect_invalid_attr(self, {'conn-bi-direct': entry})
self.assertEqual(None, desc.conn_bi_direct_end)
self.assertEqual(None, desc.conn_bi_direct_interval)
self.assertEqual(None, desc.conn_bi_direct_below)
self.assertEqual(None, desc.conn_bi_direct_read)
self.assertEqual(None, desc.conn_bi_direct_write)
self.assertEqual(None, desc.conn_bi_direct_both)
def test_percentage_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" num%
"""
for keyword in ('dirreq-v2-share', 'dirreq-v3-share'):
attr = keyword.replace('-', '_').replace('dirreq', 'dir')
test_entries = (
('0.00%', 0.0),
('0.01%', 0.0001),
('50%', 0.5),
('100.0%', 1.0),
)
for test_value, expected_value in test_entries:
desc = RelayExtraInfoDescriptor.create({keyword: test_value})
self.assertEqual(expected_value, getattr(desc, attr))
test_entries = (
(''),
(' '),
('100'),
('-5%'),
)
for entry in test_entries:
expect_invalid_attr(self, {keyword: entry}, attr)
def test_number_list_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" num,...,num
"""
for keyword in ('cell-processed-cells', 'cell-queued-cells', 'cell-time-in-queue'):
attr = keyword.replace('-', '_')
test_entries = (
('', []),
(' ', []),
('0,0,0', [0.0, 0.0, 0.0]),
('2.3,-4.6,8.9,16.12,32.15', [2.3, -4.6, 8.9, 16.12, 32.15]),
)
for test_value, expected_value in test_entries:
desc = RelayExtraInfoDescriptor.create({keyword: test_value})
self.assertEqual(expected_value, getattr(desc, attr))
test_entries = (
(',,11', [11.0]),
('abc,5.7,def', [5.7]),
('blarg', []),
)
for entry, expected in test_entries:
expect_invalid_attr(self, {keyword: entry}, attr, expected)
def test_timestamp_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" YYYY-MM-DD HH:MM:SS
"""
for keyword in ('published', 'geoip-start-time'):
attr = keyword.replace('-', '_')
desc = RelayExtraInfoDescriptor.create({keyword: '2012-05-03 12:07:50'})
self.assertEqual(datetime.datetime(2012, 5, 3, 12, 7, 50), getattr(desc, attr))
test_entries = (
'',
'2012-05-03 12:07:60',
'2012-05-03 ',
'2012-05-03',
)
for entry in test_entries:
expect_invalid_attr(self, {keyword: entry}, attr)
def test_timestamp_and_interval_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" YYYY-MM-DD HH:MM:SS (NSEC s)
"""
for keyword in ('cell-stats-end', 'entry-stats-end', 'exit-stats-end', 'bridge-stats-end', 'dirreq-stats-end'):
end_attr = keyword.replace('-', '_').replace('dirreq', 'dir')
interval_attr = end_attr[:-4] + '_interval'
desc = RelayExtraInfoDescriptor.create({keyword: '2012-05-03 12:07:50 (500 s)'})
self.assertEqual(datetime.datetime(2012, 5, 3, 12, 7, 50), getattr(desc, end_attr))
self.assertEqual(500, getattr(desc, interval_attr))
test_entries = (
'',
'2012-05-03 ',
'2012-05-03',
'2012-05-03 12:07:60 (500 s)',
'2012-05-03 12:07:50 (500s)',
'2012-05-03 12:07:50 (500 s',
'2012-05-03 12:07:50 (500 )',
)
for entry in test_entries:
desc = expect_invalid_attr(self, {'entry-stats-end': entry})
self.assertEqual(None, desc.entry_stats_end)
self.assertEqual(None, desc.entry_stats_interval)
def test_timestamp_interval_and_value_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" YYYY-MM-DD HH:MM:SS (NSEC s) NUM,NUM,NUM,NUM,NUM...
"""
for keyword in ('read-history', 'write-history', 'dirreq-read-history', 'dirreq-write-history'):
base_attr = keyword.replace('-', '_').replace('dirreq', 'dir')
end_attr = base_attr + '_end'
interval_attr = base_attr + '_interval'
values_attr = base_attr + '_values'
desc = RelayExtraInfoDescriptor.create({keyword: '2012-05-03 12:07:50 (500 s) 50,11,5'})
self.assertEqual(datetime.datetime(2012, 5, 3, 12, 7, 50), getattr(desc, end_attr))
self.assertEqual(500, getattr(desc, interval_attr))
self.assertEqual([50, 11, 5], getattr(desc, values_attr))
for test_value in ('', ' '):
desc = RelayExtraInfoDescriptor.create({'write-history': '2012-05-03 12:07:50 (500 s)%s' % test_value})
self.assertEqual(datetime.datetime(2012, 5, 3, 12, 7, 50), desc.write_history_end)
self.assertEqual(500, desc.write_history_interval)
self.assertEqual([], desc.write_history_values)
test_entries = (
'',
'2012-05-03',
'2012-05-03 12:07:60 (500 s)',
'2012-05-03 12:07:50 (500s)',
'2012-05-03 12:07:50 (500 s',
'2012-05-03 12:07:50 (500 s)11',
)
for entry in test_entries:
desc = expect_invalid_attr(self, {'write-history': entry})
self.assertEqual(None, desc.write_history_end)
self.assertEqual(None, desc.write_history_interval)
self.assertEqual(None, desc.write_history_values)
def test_port_mapping_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" port=N,port=N,...
"""
for keyword in ('exit-kibibytes-written', 'exit-kibibytes-read', 'exit-streams-opened'):
attr = keyword.replace('-', '_')
test_entries = (
('', {}),
('443=100,other=111', {443: 100, 'other': 111}),
('80=115533759,443=1777,995=690', {80: 115533759, 443: 1777, 995: 690}),
)
for test_value, expected_value in test_entries:
desc = RelayExtraInfoDescriptor.create({keyword: test_value})
self.assertEqual(expected_value, getattr(desc, attr))
test_entries = (
'8000000=115533759',
'-80=115533759',
'80=-115533759',
'=115533759',
'80=',
'80,115533759',
)
for entry in test_entries:
expect_invalid_attr(self, {keyword: entry}, attr)
def test_hidden_service_stats_end(self):
"""
Exercise the hidserv-stats-end, which should be a simple date.
"""
desc = RelayExtraInfoDescriptor.create({'hidserv-stats-end': '2012-05-03 12:07:50'})
self.assertEqual(datetime.datetime(2012, 5, 3, 12, 7, 50), desc.hs_stats_end)
test_entries = (
'',
'2012',
'2012-05',
'2012-05-03',
'2012-05-03 12',
'2012-05-03 12:07',
'2012-05-03 12:07:-50',
)
for entry in test_entries:
expect_invalid_attr(self, {'hidserv-stats-end': entry}, 'hs_stats_end')
def test_hidden_service_stats(self):
"""
Check the 'hidserv-rend-relayed-cells' and 'hidserv-dir-onions-seen', which
share the same format.
"""
attributes = (
('hidserv-rend-relayed-cells', 'hs_rend_cells', 'hs_rend_cells_attr'),
('hidserv-dir-onions-seen', 'hs_dir_onions_seen', 'hs_dir_onions_seen_attr'),
)
test_entries = (
'',
'hello',
' key=value',
'40 key',
'40 key value',
'40 key key=value',
)
for keyword, stat_attr, extra_attr in attributes:
# just the numeric stat (no extra attributes)
desc = RelayExtraInfoDescriptor.create({keyword: '345'})
self.assertEqual(345, getattr(desc, stat_attr))
self.assertEqual({}, getattr(desc, extra_attr))
# values can be negative (#15276)
desc = RelayExtraInfoDescriptor.create({keyword: '-345'})
self.assertEqual(-345, getattr(desc, stat_attr))
self.assertEqual({}, getattr(desc, extra_attr))
# with extra attributes
desc = RelayExtraInfoDescriptor.create({keyword: '345 spiffy=true snowmen=neat'})
self.assertEqual(345, getattr(desc, stat_attr))
self.assertEqual({'spiffy': 'true', 'snowmen': 'neat'}, getattr(desc, extra_attr))
for entry in test_entries:
expect_invalid_attr(self, {keyword: entry}, stat_attr)
expect_invalid_attr(self, {keyword: entry}, extra_attr, {})
def test_padding_counts(self):
"""
Check the 'hidserv-dir-onions-seen' lines.
"""
desc = RelayExtraInfoDescriptor.create({'padding-counts': '2017-05-17 11:02:58 (86400 s) bin-size=10000 write-drop=0 write-pad=10000 write-total=10000 read-drop=0 read-pad=10000 read-total=3780000 enabled-read-pad=0 enabled-read-total=0 enabled-write-pad=0 enabled-write-total=0 max-chanpad-timers=0 non-numeric=test'})
self.assertEqual({
'bin-size': 10000,
'write-drop': 0,
'write-pad': 10000,
'write-total': 10000,
'read-drop': 0,
'read-pad': 10000,
'read-total': 3780000,
'enabled-read-pad': 0,
'enabled-read-total': 0,
'enabled-write-pad': 0,
'enabled-write-total': 0,
'max-chanpad-timers': 0,
'non-numeric': 'test', # presently all values are ints but the spec allows for anything
}, desc.padding_counts)
self.assertEqual(datetime.datetime(2017, 5, 17, 11, 2, 58), desc.padding_counts_end)
self.assertEqual(86400, desc.padding_counts_interval)
test_entries = (
'',
'2012-05-03',
'2012-05-03 12:07:60 (500 s)',
'2012-05-03 12:07:50 (500 s',
'2012-05-03 12:07:50 (500s)',
'2012-05-03 12:07:50 (500 s)bin-size=10',
'2012-05-03 12:07:50 (500 s) bin-size',
'2012-05-03 12:07:50 (500 s) bin-size=',
)
for entry in test_entries:
desc = expect_invalid_attr(self, {'padding-counts': entry})
self.assertEqual({}, desc.padding_counts)
self.assertEqual(None, desc.padding_counts_end)
self.assertEqual(None, desc.padding_counts_interval)
def test_locale_mapping_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" CC=N,CC=N,...
"""
for keyword in ('dirreq-v2-ips', 'dirreq-v3-ips', 'dirreq-v2-reqs', 'dirreq-v3-reqs', 'geoip-client-origins', 'entry-ips', 'bridge-ips'):
attr = keyword.replace('-', '_').replace('dirreq', 'dir').replace('reqs', 'requests')
test_entries = (
('', {}),
('uk=5,de=3,jp=2', {'uk': 5, 'de': 3, 'jp': 2}),
)
for test_value, expected_value in test_entries:
desc = RelayExtraInfoDescriptor.create({keyword: test_value})
self.assertEqual(expected_value, getattr(desc, attr))
test_entries = (
'uk=-4',
'uki=4',
'uk:4',
'uk=4.de=3',
)
for entry in test_entries:
expect_invalid_attr(self, {keyword: entry}, attr)
def test_minimal_bridge_descriptor(self):
"""
Basic sanity check that we can parse a descriptor with minimal attributes.
"""
desc = BridgeExtraInfoDescriptor.create()
self.assertEqual('ec2bridgereaac65a3', desc.nickname)
self.assertEqual([], desc.get_unrecognized_lines())
# check that we don't have crypto fields
self.assertRaises(AttributeError, getattr, desc, 'signature')
def test_bridge_ip_versions_line(self):
"""
Parses the 'bridge-ip-versions' line, which only appears in bridges.
"""
desc = BridgeExtraInfoDescriptor.create({'bridge-ip-versions': 'v4=16,v6=40'})
self.assertEqual({'v4': 16, 'v6': 40}, desc.ip_versions)
desc = BridgeExtraInfoDescriptor.create({'bridge-ip-versions': ''})
self.assertEqual({}, desc.ip_versions)
desc_text = BridgeExtraInfoDescriptor.content({'bridge-ip-versions': 'v4=24.5'})
self.assertRaises(ValueError, RelayExtraInfoDescriptor, desc_text, True)
def test_bridge_ip_transports_line(self):
"""
Parses the 'bridge-ip-transports' line, which only appears in bridges.
"""
desc = BridgeExtraInfoDescriptor.create({'bridge-ip-transports': '<OR>=16,<??>=40'})
self.assertEqual({'<OR>': 16, '<??>': 40}, desc.ip_transports)
desc = BridgeExtraInfoDescriptor.create({'bridge-ip-transports': ''})
self.assertEqual({}, desc.ip_transports)
desc_text = BridgeExtraInfoDescriptor.content({'bridge-ip-transports': '<OR>=24.5'})
self.assertRaises(ValueError, RelayExtraInfoDescriptor, desc_text, True)
def test_transport_line(self):
"""
Basic exercise for both a bridge and relay's transport entry.
"""
desc = BridgeExtraInfoDescriptor.create({'transport': 'obfs3'})
self.assertEqual({'obfs3': (None, None, None)}, desc.transport)
self.assertEqual([], desc.get_unrecognized_lines())
desc = RelayExtraInfoDescriptor.create({'transport': 'obfs2 83.212.96.201:33570'})
self.assertEqual({'obfs2': ('83.212.96.201', 33570, [])}, desc.transport)
self.assertEqual([], desc.get_unrecognized_lines())
# multiple transport lines
desc = BridgeExtraInfoDescriptor.create({'transport': 'obfs3\ntransport obfs4'})
self.assertEqual({'obfs3': (None, None, None), 'obfs4': (None, None, None)}, desc.transport)
self.assertEqual([], desc.get_unrecognized_lines())
| lgpl-3.0 |
ossdemura/django-miniblog | src/Lib/site-packages/pip/commands/wheel.py | 341 | 7729 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import os
import warnings
from pip.basecommand import RequirementCommand
from pip.exceptions import CommandError, PreviousBuildDirError
from pip.req import RequirementSet
from pip.utils import import_or_raise
from pip.utils.build import BuildDirectory
from pip.utils.deprecation import RemovedInPip10Warning
from pip.wheel import WheelCache, WheelBuilder
from pip import cmdoptions
logger = logging.getLogger(__name__)
class WheelCommand(RequirementCommand):
"""
Build Wheel archives for your requirements and dependencies.
Wheel is a built-package format, and offers the advantage of not
recompiling your software during every install. For more details, see the
wheel docs: https://wheel.readthedocs.io/en/latest/
Requirements: setuptools>=0.8, and wheel.
'pip wheel' uses the bdist_wheel setuptools extension from the wheel
package to build individual wheels.
"""
name = 'wheel'
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Build wheels from your requirements.'
def __init__(self, *args, **kw):
super(WheelCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-w', '--wheel-dir',
dest='wheel_dir',
metavar='dir',
default=os.curdir,
help=("Build wheels into <dir>, where the default is the "
"current working directory."),
)
cmd_opts.add_option(cmdoptions.use_wheel())
cmd_opts.add_option(cmdoptions.no_use_wheel())
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(
'--build-option',
dest='build_options',
metavar='options',
action='append',
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.")
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(cmdoptions.ignore_requires_python())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the 'bdist_wheel' command.")
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
cmd_opts.add_option(cmdoptions.no_clean())
cmd_opts.add_option(cmdoptions.require_hashes())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def check_required_packages(self):
import_or_raise(
'wheel.bdist_wheel',
CommandError,
"'pip wheel' requires the 'wheel' package. To fix this, run: "
"pip install wheel"
)
pkg_resources = import_or_raise(
'pkg_resources',
CommandError,
"'pip wheel' requires setuptools >= 0.8 for dist-info support."
" To fix this, run: pip install --upgrade setuptools"
)
if not hasattr(pkg_resources, 'DistInfoDistribution'):
raise CommandError(
"'pip wheel' requires setuptools >= 0.8 for dist-info "
"support. To fix this, run: pip install --upgrade "
"setuptools"
)
def run(self, options, args):
self.check_required_packages()
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
if options.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.debug('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
with self._build_session(options) as session:
finder = self._build_package_finder(options, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=None,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=True,
ignore_requires_python=options.ignore_requires_python,
isolated=options.isolated_mode,
session=session,
wheel_cache=wheel_cache,
wheel_download_dir=options.wheel_dir,
require_hashes=options.require_hashes
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
if not requirement_set.has_requirements:
return
try:
# build wheels
wb = WheelBuilder(
requirement_set,
finder,
build_options=options.build_options or [],
global_options=options.global_options or [],
)
if not wb.build():
raise CommandError(
"Failed to build one or more wheels"
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
if not options.no_clean:
requirement_set.cleanup_files()
| mit |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/tools/json_schema_compiler/util_cc_helper.py | 31 | 1498 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
_API_UTIL_NAMESPACE = 'json_schema_compiler::util'
class UtilCCHelper(object):
"""A util class that generates code that uses
tools/json_schema_compiler/util.cc.
"""
def __init__(self, type_manager):
self._type_manager = type_manager
def PopulateArrayFromListFunction(self, optional):
"""Returns the function to turn a list into a vector.
"""
populate_list_fn = ('PopulateOptionalArrayFromList' if optional
else 'PopulateArrayFromList')
return ('%s::%s') % (_API_UTIL_NAMESPACE, populate_list_fn)
def CreateValueFromArray(self, src, optional):
"""Generates code to create a scoped_pt<Value> from the array at src.
|src| The variable to convert, either a vector or scoped_ptr<vector>.
|optional| Whether |type_| was optional. Optional types are pointers so
must be treated differently.
"""
if optional:
name = 'CreateValueFromOptionalArray'
else:
name = 'CreateValueFromArray'
return '%s::%s(%s)' % (_API_UTIL_NAMESPACE, name, src)
def GetIncludePath(self):
return '#include "tools/json_schema_compiler/util.h"'
def GetValueTypeString(self, value, is_ptr=False):
call = '.GetType()'
if is_ptr:
call = '->GetType()'
return 'json_schema_compiler::util::ValueTypeToString(%s%s)' % (value, call)
| mit |
anntzer/scikit-learn | sklearn/linear_model/_passive_aggressive.py | 2 | 17363 | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from ..utils.validation import _deprecate_positional_args
from ._stochastic_gradient import BaseSGDClassifier
from ._stochastic_gradient import BaseSGDRegressor
from ._stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float, default=1.0
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float or None, default=1e-3
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol).
.. versionadded:: 0.19
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation.
score is not improving. If set to True, it will automatically set aside
a stratified fraction of training data as validation and terminate
training when validation score is not improving by at least tol for
n_iter_no_change consecutive epochs.
.. versionadded:: 0.20
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
.. versionadded:: 0.20
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : integer, default=0
The verbosity level
loss : string, default="hinge"
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
n_jobs : int or None, default=None
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Used to shuffle the training data, when ``shuffle`` is set to
``True``. Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
class_weight : dict, {class_label: weight} or "balanced" or None, \
default=None
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
.. versionadded:: 0.17
parameter *class_weight* to automatically weight samples.
average : bool or int, default=False
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
For multiclass fits, it is the maximum over every binary fit.
classes_ : array of shape (n_classes,)
The unique classes labels.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
loss_function_ : callable
Loss function used by the algorithm.
Examples
--------
>>> from sklearn.linear_model import PassiveAggressiveClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_features=4, random_state=0)
>>> clf = PassiveAggressiveClassifier(max_iter=1000, random_state=0,
... tol=1e-3)
>>> clf.fit(X, y)
PassiveAggressiveClassifier(random_state=0)
>>> print(clf.coef_)
[[0.26642044 0.45070924 0.67251877 0.64185414]]
>>> print(clf.intercept_)
[1.84127814]
>>> print(clf.predict([[0, 0, 0, 0]]))
[1]
See Also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
@_deprecate_positional_args
def __init__(self, *, C=1.0, fit_intercept=True, max_iter=1000, tol=1e-3,
early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, shuffle=True, verbose=0, loss="hinge",
n_jobs=None, random_state=None, warm_start=False,
class_weight=None, average=False):
super().__init__(
penalty=None,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
average=average,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
self._validate_params(for_partial_fit=True)
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, max_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
self._validate_params()
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float, default=1.0
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float or None, default=1e-3
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol).
.. versionadded:: 0.19
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation.
score is not improving. If set to True, it will automatically set aside
a fraction of training data as validation and terminate
training when validation score is not improving by at least tol for
n_iter_no_change consecutive epochs.
.. versionadded:: 0.20
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
.. versionadded:: 0.20
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : integer, default=0
The verbosity level
loss : string, default="epsilon_insensitive"
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
epsilon : float, default=0.1
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
random_state : int, RandomState instance, default=None
Used to shuffle the training data, when ``shuffle`` is set to
``True``. Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
average : bool or int, default=False
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
Examples
--------
>>> from sklearn.linear_model import PassiveAggressiveRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=4, random_state=0)
>>> regr = PassiveAggressiveRegressor(max_iter=100, random_state=0,
... tol=1e-3)
>>> regr.fit(X, y)
PassiveAggressiveRegressor(max_iter=100, random_state=0)
>>> print(regr.coef_)
[20.48736655 34.18818427 67.59122734 87.94731329]
>>> print(regr.intercept_)
[-0.02306214]
>>> print(regr.predict([[0, 0, 0, 0]]))
[-0.02306214]
See Also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
@_deprecate_positional_args
def __init__(self, *, C=1.0, fit_intercept=True, max_iter=1000, tol=1e-3,
early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, shuffle=True, verbose=0,
loss="epsilon_insensitive", epsilon=DEFAULT_EPSILON,
random_state=None, warm_start=False,
average=False):
super().__init__(
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start,
average=average)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
self._validate_params(for_partial_fit=True)
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, max_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
self._validate_params()
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
| bsd-3-clause |
wnt-zhp/hufce | django/contrib/formtools/tests/__init__.py | 78 | 16958 | import os
import re
import warnings
from django import http
from django.conf import settings
from django.contrib.formtools import preview, utils
from django.contrib.formtools.wizard import FormWizard
from django.test import TestCase
from django.test.utils import get_warnings_state, restore_warnings_state
from django.utils import unittest
from django.contrib.formtools.tests.wizard import *
from django.contrib.formtools.tests.forms import *
warnings.filterwarnings('ignore', category=PendingDeprecationWarning,
module='django.contrib.formtools.wizard')
success_string = "Done was called!"
class TestFormPreview(preview.FormPreview):
def get_context(self, request, form):
context = super(TestFormPreview, self).get_context(request, form)
context.update({'custom_context': True})
return context
def get_initial(self, request):
return {'field1': 'Works!'}
def done(self, request, cleaned_data):
return http.HttpResponse(success_string)
class FormToolsTestCase(TestCase):
def setUp(self):
# in the test runner use templates/tests/ to provide base.html
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = list(settings.TEMPLATE_DIRS) + [
os.path.join(os.path.dirname(__file__), 'templates')]
def tearDown(self):
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
class PreviewTests(FormToolsTestCase):
urls = 'django.contrib.formtools.tests.urls'
def setUp(self):
super(PreviewTests, self).setUp()
self.save_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.contrib.formtools.utils')
# Create a FormPreview instance to share between tests
self.preview = preview.FormPreview(TestForm)
input_template = '<input type="hidden" name="%s" value="%s" />'
self.input = input_template % (self.preview.unused_name('stage'), "%d")
self.test_data = {'field1':u'foo', 'field1_':u'asdf'}
def tearDown(self):
super(PreviewTests, self).tearDown()
self.restore_warnings_state()
def test_unused_name(self):
"""
Verifies name mangling to get uniue field name.
"""
self.assertEqual(self.preview.unused_name('field1'), 'field1__')
def test_form_get(self):
"""
Test contrib.formtools.preview form retrieval.
Use the client library to see if we can sucessfully retrieve
the form (mostly testing the setup ROOT_URLCONF
process). Verify that an additional hidden input field
is created to manage the stage.
"""
response = self.client.get('/preview/')
stage = self.input % 1
self.assertContains(response, stage, 1)
self.assertEqual(response.context['custom_context'], True)
self.assertEqual(response.context['form'].initial, {'field1': 'Works!'})
def test_form_preview(self):
"""
Test contrib.formtools.preview form preview rendering.
Use the client library to POST to the form to see if a preview
is returned. If we do get a form back check that the hidden
value is correctly managing the state of the form.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage': 1})
response = self.client.post('/preview/', self.test_data)
# Check to confirm stage is set to 2 in output form.
stage = self.input % 2
self.assertContains(response, stage, 1)
def test_form_submit(self):
"""
Test contrib.formtools.preview form submittal.
Use the client library to POST to the form with stage set to 3
to see if our forms done() method is called. Check first
without the security hash, verify failure, retry with security
hash and verify sucess.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage':2})
response = self.client.post('/preview/', self.test_data)
self.assertNotEqual(response.content, success_string)
hash = self.preview.security_hash(None, TestForm(self.test_data))
self.test_data.update({'hash': hash})
response = self.client.post('/preview/', self.test_data)
self.assertEqual(response.content, success_string)
def test_bool_submit(self):
"""
Test contrib.formtools.preview form submittal when form contains:
BooleanField(required=False)
Ticket: #6209 - When an unchecked BooleanField is previewed, the preview
form's hash would be computed with no value for ``bool1``. However, when
the preview form is rendered, the unchecked hidden BooleanField would be
rendered with the string value 'False'. So when the preview form is
resubmitted, the hash would be computed with the value 'False' for
``bool1``. We need to make sure the hashes are the same in both cases.
"""
self.test_data.update({'stage':2})
hash = self.preview.security_hash(None, TestForm(self.test_data))
self.test_data.update({'hash':hash, 'bool1':u'False'})
response = self.client.post('/preview/', self.test_data)
self.assertEqual(response.content, success_string)
def test_form_submit_good_hash(self):
"""
Test contrib.formtools.preview form submittal, using a correct
hash
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage':2})
response = self.client.post('/preview/', self.test_data)
self.assertNotEqual(response.content, success_string)
hash = utils.form_hmac(TestForm(self.test_data))
self.test_data.update({'hash': hash})
response = self.client.post('/preview/', self.test_data)
self.assertEqual(response.content, success_string)
def test_form_submit_bad_hash(self):
"""
Test contrib.formtools.preview form submittal does not proceed
if the hash is incorrect.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage':2})
response = self.client.post('/preview/', self.test_data)
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.content, success_string)
hash = utils.form_hmac(TestForm(self.test_data)) + "bad"
self.test_data.update({'hash': hash})
response = self.client.post('/previewpreview/', self.test_data)
self.assertNotEqual(response.content, success_string)
class SecurityHashTests(unittest.TestCase):
def setUp(self):
self._warnings_state = get_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.contrib.formtools.utils')
def tearDown(self):
restore_warnings_state(self._warnings_state)
def test_textfield_hash(self):
"""
Regression test for #10034: the hash generation function should ignore
leading/trailing whitespace so as to be friendly to broken browsers that
submit it (usually in textareas).
"""
f1 = HashTestForm({'name': 'joe', 'bio': 'Nothing notable.'})
f2 = HashTestForm({'name': ' joe', 'bio': 'Nothing notable. '})
hash1 = utils.security_hash(None, f1)
hash2 = utils.security_hash(None, f2)
self.assertEqual(hash1, hash2)
def test_empty_permitted(self):
"""
Regression test for #10643: the security hash should allow forms with
empty_permitted = True, or forms where data has not changed.
"""
f1 = HashTestBlankForm({})
f2 = HashTestForm({}, empty_permitted=True)
hash1 = utils.security_hash(None, f1)
hash2 = utils.security_hash(None, f2)
self.assertEqual(hash1, hash2)
class FormHmacTests(unittest.TestCase):
"""
Same as SecurityHashTests, but with form_hmac
"""
def test_textfield_hash(self):
"""
Regression test for #10034: the hash generation function should ignore
leading/trailing whitespace so as to be friendly to broken browsers that
submit it (usually in textareas).
"""
f1 = HashTestForm({'name': 'joe', 'bio': 'Nothing notable.'})
f2 = HashTestForm({'name': ' joe', 'bio': 'Nothing notable. '})
hash1 = utils.form_hmac(f1)
hash2 = utils.form_hmac(f2)
self.assertEqual(hash1, hash2)
def test_empty_permitted(self):
"""
Regression test for #10643: the security hash should allow forms with
empty_permitted = True, or forms where data has not changed.
"""
f1 = HashTestBlankForm({})
f2 = HashTestForm({}, empty_permitted=True)
hash1 = utils.form_hmac(f1)
hash2 = utils.form_hmac(f2)
self.assertEqual(hash1, hash2)
#
# FormWizard tests
#
class TestWizardClass(FormWizard):
def get_template(self, step):
return 'forms/wizard.html'
def done(self, request, cleaned_data):
return http.HttpResponse(success_string)
class DummyRequest(http.HttpRequest):
def __init__(self, POST=None):
super(DummyRequest, self).__init__()
self.method = POST and "POST" or "GET"
if POST is not None:
self.POST.update(POST)
self._dont_enforce_csrf_checks = True
class WizardTests(FormToolsTestCase):
urls = 'django.contrib.formtools.tests.urls'
input_re = re.compile('name="([^"]+)" value="([^"]+)"')
wizard_step_data = (
{
'0-name': 'Pony',
'0-thirsty': '2',
},
{
'1-address1': '123 Main St',
'1-address2': 'Djangoland',
},
{
'2-random_crap': 'blah blah',
}
)
def setUp(self):
super(WizardTests, self).setUp()
# Use a known SECRET_KEY to make security_hash tests deterministic
self.old_SECRET_KEY = settings.SECRET_KEY
settings.SECRET_KEY = "123"
def tearDown(self):
super(WizardTests, self).tearDown()
settings.SECRET_KEY = self.old_SECRET_KEY
def test_step_starts_at_zero(self):
"""
step should be zero for the first form
"""
response = self.client.get('/wizard1/')
self.assertEqual(0, response.context['step0'])
def test_step_increments(self):
"""
step should be incremented when we go to the next page
"""
response = self.client.post('/wizard1/', {"0-field":"test", "wizard_step":"0"})
self.assertEqual(1, response.context['step0'])
def test_bad_hash(self):
"""
Form should not advance if the hash is missing or bad
"""
response = self.client.post('/wizard1/',
{"0-field":"test",
"1-field":"test2",
"wizard_step": "1"})
self.assertEqual(0, response.context['step0'])
def test_good_hash(self):
"""
Form should advance if the hash is present and good, as calculated using
current method.
"""
data = {"0-field": "test",
"1-field": "test2",
"hash_0": "7e9cea465f6a10a6fb47fcea65cb9a76350c9a5c",
"wizard_step": "1"}
response = self.client.post('/wizard1/', data)
self.assertEqual(2, response.context['step0'])
def test_11726(self):
"""
Regression test for ticket #11726.
Wizard should not raise Http404 when steps are added dynamically.
"""
reached = [False]
that = self
class WizardWithProcessStep(TestWizardClass):
def process_step(self, request, form, step):
if step == 0:
if self.num_steps() < 2:
self.form_list.append(WizardPageTwoForm)
if step == 1:
that.assertTrue(isinstance(form, WizardPageTwoForm))
reached[0] = True
wizard = WizardWithProcessStep([WizardPageOneForm])
data = {"0-field": "test",
"1-field": "test2",
"hash_0": "7e9cea465f6a10a6fb47fcea65cb9a76350c9a5c",
"wizard_step": "1"}
wizard(DummyRequest(POST=data))
self.assertTrue(reached[0])
data = {"0-field": "test",
"1-field": "test2",
"hash_0": "7e9cea465f6a10a6fb47fcea65cb9a76350c9a5c",
"hash_1": "d5b434e3934cc92fee4bd2964c4ebc06f81d362d",
"wizard_step": "2"}
self.assertRaises(http.Http404, wizard, DummyRequest(POST=data))
def test_14498(self):
"""
Regression test for ticket #14498. All previous steps' forms should be
validated.
"""
reached = [False]
that = self
class WizardWithProcessStep(TestWizardClass):
def process_step(self, request, form, step):
that.assertTrue(hasattr(form, 'cleaned_data'))
reached[0] = True
wizard = WizardWithProcessStep([WizardPageOneForm,
WizardPageTwoForm,
WizardPageThreeForm])
data = {"0-field": "test",
"1-field": "test2",
"hash_0": "7e9cea465f6a10a6fb47fcea65cb9a76350c9a5c",
"wizard_step": "1"}
wizard(DummyRequest(POST=data))
self.assertTrue(reached[0])
def test_14576(self):
"""
Regression test for ticket #14576.
The form of the last step is not passed to the done method.
"""
reached = [False]
that = self
class Wizard(TestWizardClass):
def done(self, request, form_list):
reached[0] = True
that.assertTrue(len(form_list) == 2)
wizard = Wizard([WizardPageOneForm,
WizardPageTwoForm])
data = {"0-field": "test",
"1-field": "test2",
"hash_0": "7e9cea465f6a10a6fb47fcea65cb9a76350c9a5c",
"wizard_step": "1"}
wizard(DummyRequest(POST=data))
self.assertTrue(reached[0])
def test_15075(self):
"""
Regression test for ticket #15075. Allow modifying wizard's form_list
in process_step.
"""
reached = [False]
that = self
class WizardWithProcessStep(TestWizardClass):
def process_step(self, request, form, step):
if step == 0:
self.form_list[1] = WizardPageTwoAlternativeForm
if step == 1:
that.assertTrue(isinstance(form, WizardPageTwoAlternativeForm))
reached[0] = True
wizard = WizardWithProcessStep([WizardPageOneForm,
WizardPageTwoForm,
WizardPageThreeForm])
data = {"0-field": "test",
"1-field": "test2",
"hash_0": "7e9cea465f6a10a6fb47fcea65cb9a76350c9a5c",
"wizard_step": "1"}
wizard(DummyRequest(POST=data))
self.assertTrue(reached[0])
def grab_field_data(self, response):
"""
Pull the appropriate field data from the context to pass to the next wizard step
"""
previous_fields = response.context['previous_fields']
fields = {'wizard_step': response.context['step0']}
def grab(m):
fields[m.group(1)] = m.group(2)
return ''
self.input_re.sub(grab, previous_fields)
return fields
def check_wizard_step(self, response, step_no):
"""
Helper function to test each step of the wizard
- Make sure the call succeeded
- Make sure response is the proper step number
- return the result from the post for the next step
"""
step_count = len(self.wizard_step_data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Step %d of %d' % (step_no, step_count))
data = self.grab_field_data(response)
data.update(self.wizard_step_data[step_no - 1])
return self.client.post('/wizard2/', data)
def test_9473(self):
response = self.client.get('/wizard2/')
for step_no in range(1, len(self.wizard_step_data) + 1):
response = self.check_wizard_step(response, step_no)
| gpl-3.0 |
akarol/cfme_tests | cfme/cloud/provider/azure.py | 2 | 2877 | from wrapanapi.msazure import AzureSystem
from cfme.common.provider import DefaultEndpoint, DefaultEndpointForm
from cfme.utils.version import pick
from . import CloudProvider
class AzureEndpoint(DefaultEndpoint):
"""
represents default Azure endpoint (Add/Edit dialogs)
"""
@property
def view_value_mapping(self):
return {}
class AzureEndpointForm(DefaultEndpointForm):
"""
represents default Azure endpoint form in UI (Add/Edit dialogs)
"""
pass
class AzureProvider(CloudProvider):
"""
BaseProvider->CloudProvider->AzureProvider class.
represents CFME provider and operations available in UI
"""
catalog_name = "Azure"
type_name = "azure"
mgmt_class = AzureSystem
db_types = ["Azure::CloudManager"]
endpoints_form = AzureEndpointForm
discover_name = "Azure"
def __init__(self, name=None, endpoints=None, zone=None, key=None, region=None,
tenant_id=None, subscription_id=None, appliance=None):
super(AzureProvider, self).__init__(name=name, endpoints=endpoints,
zone=zone, key=key, appliance=appliance)
self.region = region # Region can be a string or a dict for version pick
self.tenant_id = tenant_id
self.subscription_id = subscription_id
@property
def view_value_mapping(self):
"""Maps values to view attrs"""
region = pick(self.region) if isinstance(self.region, dict) else self.region
return {
'name': self.name,
'prov_type': 'Azure',
'region': region,
'tenant_id': self.tenant_id,
'subscription': getattr(self, 'subscription_id', None)
}
def deployment_helper(self, deploy_args):
""" Used in utils.virtual_machines """
return self.data['provisioning']
@classmethod
def from_config(cls, prov_config, prov_key, appliance=None):
endpoint = AzureEndpoint(**prov_config['endpoints']['default'])
# HACK: stray domain entry in credentials, so ensure it is not there
endpoint.credentials.domain = None
return cls(
name=prov_config['name'],
region=prov_config.get('region'),
tenant_id=prov_config['tenant_id'],
subscription_id=prov_config['subscription_id'],
endpoints={endpoint.name: endpoint},
key=prov_key,
appliance=appliance)
@staticmethod
def discover_dict(credential):
"""Returns the discovery credentials dictionary"""
return {
'client_id': getattr(credential, 'principal', None),
'client_key': getattr(credential, 'secret', None),
'tenant_id': getattr(credential, 'tenant_id', None),
'subscription': getattr(credential, 'subscription_id', None)
}
| gpl-2.0 |
vit-/telegram-uz-bot | uz/tests/interface/telegram/test_bot.py | 1 | 5489 | import time
from datetime import datetime
import mock
import pytest
from uz.tests import Awaitable
from uz.interface.telegram import bot
from uz.scanner import UknkownScanID
CHAT_ID = 'chat_id'
def tg_message(text):
return {
'chat': {
'id': CHAT_ID,
'type': 'private',
},
'from': {'first_name': 'n/a', 'id': 'user_id'},
'message_id': int(time.time()),
'text': text
}
def get_reply(send_message_mock):
args, kwargs = send_message_mock.call_args_list[0]
return args[1]
@pytest.mark.asyncio
async def test_list_trains(source_station, destination_station, train):
bot.send_message = send_message = mock.MagicMock(return_value=Awaitable())
date = datetime(2016, 7, 21)
command = '/trains {} {} {}'.format(
date.strftime('%Y-%m-%d'), source_station.title, destination_station.title)
with mock.patch('uz.interface.serializer.Deserializer.load',
return_value=Awaitable((date, source_station, destination_station))) as load, \
mock.patch('uz.client.client.UZClient.list_trains',
return_value=Awaitable([train])) as list_trains:
await bot._process_message(tg_message(command))
load.assert_called_once_with({
'date': date.strftime('%Y-%m-%d'),
'source': source_station.title,
'destination': destination_station.title})
list_trains.assert_called_once_with(date, source_station, destination_station)
msg = get_reply(send_message)
title = 'Trains from %s to %s on %s:' % (
source_station, destination_station, date.date())
assert msg.startswith(title)
assert train.info() in msg
@pytest.mark.asyncio
@pytest.mark.parametrize('is_ok', [True, False])
async def test_status(is_ok):
scan_id = 'id1234'
scanner = mock.MagicMock()
if is_ok:
scanner.status.return_value = (attempts, error) = (10, 'i am error')
else:
scanner.status.side_effect = UknkownScanID()
bot.send_message = send_message = mock.MagicMock(return_value=Awaitable())
bot.set_scanner(scanner)
await bot._process_message(tg_message('/status_{}'.format(scan_id)))
scanner.status.assert_called_once_with(scan_id)
if is_ok:
send_message.assert_called_once_with(
CHAT_ID, 'No attempts: {}\nLast error message: {}'.format(attempts, error))
else:
send_message.assert_called_once_with(
CHAT_ID, 'Unknown scan id: {}'.format(scan_id))
@pytest.mark.asyncio
@pytest.mark.parametrize('is_ok', [True, False])
async def test_abort_scan(is_ok):
scan_id = 'id4321'
scanner = mock.MagicMock()
if is_ok:
scanner.abort.return_value = True
else:
scanner.abort.side_effect = UknkownScanID()
bot.send_message = send_message = mock.MagicMock(return_value=Awaitable())
bot.set_scanner(scanner)
await bot._process_message(tg_message('/abort_{}'.format(scan_id)))
scanner.abort.assert_called_once_with(scan_id)
if is_ok:
send_message.assert_called_once_with(
CHAT_ID, 'OK')
else:
send_message.assert_called_once_with(
CHAT_ID, 'Unknown scan id: {}'.format(scan_id))
@pytest.mark.asyncio
@pytest.mark.parametrize('ct_letter', [None, 'C2'])
async def test_scan(source_station, destination_station, ct_letter):
scan_id = 'id1234'
date = datetime(2016, 10, 7)
train_num = '744K'
firstname = 'username'
lastname = 'surname'
parts = [
'/scan',
firstname,
lastname,
date.strftime('%Y-%m-%d'),
source_station,
destination_station,
train_num]
if ct_letter:
parts.append(ct_letter)
command = ' '.join(str(i) for i in parts)
scanner = mock.MagicMock()
scanner.add_item.return_value = scan_id
bot.send_message = send_message = mock.MagicMock(return_value=Awaitable())
bot.set_scanner(scanner)
with mock.patch('uz.interface.serializer.Deserializer.load',
return_value=Awaitable((date, source_station, destination_station))) as load:
await bot._process_message(tg_message(command))
load.assert_called_once_with({
'firstname': firstname,
'lastname': lastname,
'date': date.strftime('%Y-%m-%d'),
'source': source_station.title,
'destination': destination_station.title,
'train_num': train_num,
'ct_letter': ct_letter})
scanner.add_item.assert_called_once_with(
mock.ANY, firstname, lastname, date, source_station, destination_station,
train_num, ct_letter)
expected = ('Scanning tickets for train {train} from {src} to {dst} on {date}.\n'
'To monitor scan status: /status_{sid}\n'
'To abort scan: /abort_{sid}').format(
train=train_num,
src=source_station,
dst=destination_station,
date=date.date(),
sid=scan_id)
send_message.assert_called_once_with(CHAT_ID, expected)
@pytest.mark.asyncio
async def test_hello():
bot.send_message = send_message = mock.MagicMock(return_value=Awaitable())
await bot._process_message(tg_message('hi'))
send_message.assert_called_once_with(CHAT_ID, mock.ANY)
@pytest.mark.asyncio
async def test_help_msg():
bot.send_message = send_message = mock.MagicMock(return_value=Awaitable())
await bot._process_message(tg_message('/help'))
send_message.assert_called_once_with(CHAT_ID, mock.ANY)
| mit |
EmmanuelJohnson/ssquiz | flask/lib/python2.7/site-packages/babel/messages/pofile.py | 136 | 16320 | # -*- coding: utf-8 -*-
"""
babel.messages.pofile
~~~~~~~~~~~~~~~~~~~~~
Reading and writing of files in the ``gettext`` PO (portable object)
format.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import os
import re
from babel.messages.catalog import Catalog, Message
from babel.util import wraptext
from babel._compat import text_type
def unescape(string):
r"""Reverse `escape` the given string.
>>> print unescape('"Say:\\n \\"hello, world!\\"\\n"')
Say:
"hello, world!"
<BLANKLINE>
:param string: the string to unescape
"""
def replace_escapes(match):
m = match.group(1)
if m == 'n':
return '\n'
elif m == 't':
return '\t'
elif m == 'r':
return '\r'
# m is \ or "
return m
return re.compile(r'\\([\\trn"])').sub(replace_escapes, string[1:-1])
def denormalize(string):
r"""Reverse the normalization done by the `normalize` function.
>>> print denormalize(r'''""
... "Say:\n"
... " \"hello, world!\"\n"''')
Say:
"hello, world!"
<BLANKLINE>
>>> print denormalize(r'''""
... "Say:\n"
... " \"Lorem ipsum dolor sit "
... "amet, consectetur adipisicing"
... " elit, \"\n"''')
Say:
"Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
<BLANKLINE>
:param string: the string to denormalize
"""
if '\n' in string:
escaped_lines = string.splitlines()
if string.startswith('""'):
escaped_lines = escaped_lines[1:]
lines = map(unescape, escaped_lines)
return ''.join(lines)
else:
return unescape(string)
def read_po(fileobj, locale=None, domain=None, ignore_obsolete=False, charset=None):
"""Read messages from a ``gettext`` PO (portable object) file from the given
file-like object and return a `Catalog`.
>>> from datetime import datetime
>>> from StringIO import StringIO
>>> buf = StringIO('''
... #: main.py:1
... #, fuzzy, python-format
... msgid "foo %(name)s"
... msgstr "quux %(name)s"
...
... # A user comment
... #. An auto comment
... #: main.py:3
... msgid "bar"
... msgid_plural "baz"
... msgstr[0] "bar"
... msgstr[1] "baaz"
... ''')
>>> catalog = read_po(buf)
>>> catalog.revision_date = datetime(2007, 04, 01)
>>> for message in catalog:
... if message.id:
... print (message.id, message.string)
... print ' ', (message.locations, message.flags)
... print ' ', (message.user_comments, message.auto_comments)
(u'foo %(name)s', u'quux %(name)s')
([(u'main.py', 1)], set([u'fuzzy', u'python-format']))
([], [])
((u'bar', u'baz'), (u'bar', u'baaz'))
([(u'main.py', 3)], set([]))
([u'A user comment'], [u'An auto comment'])
.. versionadded:: 1.0
Added support for explicit charset argument.
:param fileobj: the file-like object to read the PO file from
:param locale: the locale identifier or `Locale` object, or `None`
if the catalog is not bound to a locale (which basically
means it's a template)
:param domain: the message domain
:param ignore_obsolete: whether to ignore obsolete messages in the input
:param charset: the character set of the catalog.
"""
catalog = Catalog(locale=locale, domain=domain, charset=charset)
counter = [0]
offset = [0]
messages = []
translations = []
locations = []
flags = []
user_comments = []
auto_comments = []
obsolete = [False]
context = []
in_msgid = [False]
in_msgstr = [False]
in_msgctxt = [False]
def _add_message():
translations.sort()
if len(messages) > 1:
msgid = tuple([denormalize(m) for m in messages])
else:
msgid = denormalize(messages[0])
if isinstance(msgid, (list, tuple)):
string = []
for idx in range(catalog.num_plurals):
try:
string.append(translations[idx])
except IndexError:
string.append((idx, ''))
string = tuple([denormalize(t[1]) for t in string])
else:
string = denormalize(translations[0][1])
if context:
msgctxt = denormalize('\n'.join(context))
else:
msgctxt = None
message = Message(msgid, string, list(locations), set(flags),
auto_comments, user_comments, lineno=offset[0] + 1,
context=msgctxt)
if obsolete[0]:
if not ignore_obsolete:
catalog.obsolete[msgid] = message
else:
catalog[msgid] = message
del messages[:]; del translations[:]; del context[:]; del locations[:];
del flags[:]; del auto_comments[:]; del user_comments[:];
obsolete[0] = False
counter[0] += 1
def _process_message_line(lineno, line):
if line.startswith('msgid_plural'):
in_msgid[0] = True
msg = line[12:].lstrip()
messages.append(msg)
elif line.startswith('msgid'):
in_msgid[0] = True
offset[0] = lineno
txt = line[5:].lstrip()
if messages:
_add_message()
messages.append(txt)
elif line.startswith('msgstr'):
in_msgid[0] = False
in_msgstr[0] = True
msg = line[6:].lstrip()
if msg.startswith('['):
idx, msg = msg[1:].split(']', 1)
translations.append([int(idx), msg.lstrip()])
else:
translations.append([0, msg])
elif line.startswith('msgctxt'):
if messages:
_add_message()
in_msgid[0] = in_msgstr[0] = False
context.append(line[7:].lstrip())
elif line.startswith('"'):
if in_msgid[0]:
messages[-1] += u'\n' + line.rstrip()
elif in_msgstr[0]:
translations[-1][1] += u'\n' + line.rstrip()
elif in_msgctxt[0]:
context.append(line.rstrip())
for lineno, line in enumerate(fileobj.readlines()):
line = line.strip()
if not isinstance(line, text_type):
line = line.decode(catalog.charset)
if line.startswith('#'):
in_msgid[0] = in_msgstr[0] = False
if messages and translations:
_add_message()
if line[1:].startswith(':'):
for location in line[2:].lstrip().split():
pos = location.rfind(':')
if pos >= 0:
try:
lineno = int(location[pos + 1:])
except ValueError:
continue
locations.append((location[:pos], lineno))
elif line[1:].startswith(','):
for flag in line[2:].lstrip().split(','):
flags.append(flag.strip())
elif line[1:].startswith('~'):
obsolete[0] = True
_process_message_line(lineno, line[2:].lstrip())
elif line[1:].startswith('.'):
# These are called auto-comments
comment = line[2:].strip()
if comment: # Just check that we're not adding empty comments
auto_comments.append(comment)
else:
# These are called user comments
user_comments.append(line[1:].strip())
else:
_process_message_line(lineno, line)
if messages:
_add_message()
# No actual messages found, but there was some info in comments, from which
# we'll construct an empty header message
elif not counter[0] and (flags or user_comments or auto_comments):
messages.append(u'')
translations.append([0, u''])
_add_message()
return catalog
WORD_SEP = re.compile('('
r'\s+|' # any whitespace
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w)' # em-dash
')')
def escape(string):
r"""Escape the given string so that it can be included in double-quoted
strings in ``PO`` files.
>>> escape('''Say:
... "hello, world!"
... ''')
'"Say:\\n \\"hello, world!\\"\\n"'
:param string: the string to escape
"""
return '"%s"' % string.replace('\\', '\\\\') \
.replace('\t', '\\t') \
.replace('\r', '\\r') \
.replace('\n', '\\n') \
.replace('\"', '\\"')
def normalize(string, prefix='', width=76):
r"""Convert a string into a format that is appropriate for .po files.
>>> print normalize('''Say:
... "hello, world!"
... ''', width=None)
""
"Say:\n"
" \"hello, world!\"\n"
>>> print normalize('''Say:
... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
... ''', width=32)
""
"Say:\n"
" \"Lorem ipsum dolor sit "
"amet, consectetur adipisicing"
" elit, \"\n"
:param string: the string to normalize
:param prefix: a string that should be prepended to every line
:param width: the maximum line width; use `None`, 0, or a negative number
to completely disable line wrapping
"""
if width and width > 0:
prefixlen = len(prefix)
lines = []
for line in string.splitlines(True):
if len(escape(line)) + prefixlen > width:
chunks = WORD_SEP.split(line)
chunks.reverse()
while chunks:
buf = []
size = 2
while chunks:
l = len(escape(chunks[-1])) - 2 + prefixlen
if size + l < width:
buf.append(chunks.pop())
size += l
else:
if not buf:
# handle long chunks by putting them on a
# separate line
buf.append(chunks.pop())
break
lines.append(u''.join(buf))
else:
lines.append(line)
else:
lines = string.splitlines(True)
if len(lines) <= 1:
return escape(string)
# Remove empty trailing line
if lines and not lines[-1]:
del lines[-1]
lines[-1] += '\n'
return u'""\n' + u'\n'.join([(prefix + escape(l)) for l in lines])
def write_po(fileobj, catalog, width=76, no_location=False, omit_header=False,
sort_output=False, sort_by_file=False, ignore_obsolete=False,
include_previous=False):
r"""Write a ``gettext`` PO (portable object) template file for a given
message catalog to the provided file-like object.
>>> catalog = Catalog()
>>> catalog.add(u'foo %(name)s', locations=[('main.py', 1)],
... flags=('fuzzy',))
<Message...>
>>> catalog.add((u'bar', u'baz'), locations=[('main.py', 3)])
<Message...>
>>> from io import BytesIO
>>> buf = BytesIO()
>>> write_po(buf, catalog, omit_header=True)
>>> print buf.getvalue()
#: main.py:1
#, fuzzy, python-format
msgid "foo %(name)s"
msgstr ""
<BLANKLINE>
#: main.py:3
msgid "bar"
msgid_plural "baz"
msgstr[0] ""
msgstr[1] ""
<BLANKLINE>
<BLANKLINE>
:param fileobj: the file-like object to write to
:param catalog: the `Catalog` instance
:param width: the maximum line width for the generated output; use `None`,
0, or a negative number to completely disable line wrapping
:param no_location: do not emit a location comment for every message
:param omit_header: do not include the ``msgid ""`` entry at the top of the
output
:param sort_output: whether to sort the messages in the output by msgid
:param sort_by_file: whether to sort the messages in the output by their
locations
:param ignore_obsolete: whether to ignore obsolete messages and not include
them in the output; by default they are included as
comments
:param include_previous: include the old msgid as a comment when
updating the catalog
"""
def _normalize(key, prefix=''):
return normalize(key, prefix=prefix, width=width)
def _write(text):
if isinstance(text, text_type):
text = text.encode(catalog.charset, 'backslashreplace')
fileobj.write(text)
def _write_comment(comment, prefix=''):
# xgettext always wraps comments even if --no-wrap is passed;
# provide the same behaviour
if width and width > 0:
_width = width
else:
_width = 76
for line in wraptext(comment, _width):
_write('#%s %s\n' % (prefix, line.strip()))
def _write_message(message, prefix=''):
if isinstance(message.id, (list, tuple)):
if message.context:
_write('%smsgctxt %s\n' % (prefix,
_normalize(message.context, prefix)))
_write('%smsgid %s\n' % (prefix, _normalize(message.id[0], prefix)))
_write('%smsgid_plural %s\n' % (
prefix, _normalize(message.id[1], prefix)
))
for idx in range(catalog.num_plurals):
try:
string = message.string[idx]
except IndexError:
string = ''
_write('%smsgstr[%d] %s\n' % (
prefix, idx, _normalize(string, prefix)
))
else:
if message.context:
_write('%smsgctxt %s\n' % (prefix,
_normalize(message.context, prefix)))
_write('%smsgid %s\n' % (prefix, _normalize(message.id, prefix)))
_write('%smsgstr %s\n' % (
prefix, _normalize(message.string or '', prefix)
))
messages = list(catalog)
if sort_output:
messages.sort()
elif sort_by_file:
messages.sort(lambda x,y: cmp(x.locations, y.locations))
for message in messages:
if not message.id: # This is the header "message"
if omit_header:
continue
comment_header = catalog.header_comment
if width and width > 0:
lines = []
for line in comment_header.splitlines():
lines += wraptext(line, width=width,
subsequent_indent='# ')
comment_header = u'\n'.join(lines)
_write(comment_header + u'\n')
for comment in message.user_comments:
_write_comment(comment)
for comment in message.auto_comments:
_write_comment(comment, prefix='.')
if not no_location:
locs = u' '.join([u'%s:%d' % (filename.replace(os.sep, '/'), lineno)
for filename, lineno in message.locations])
_write_comment(locs, prefix=':')
if message.flags:
_write('#%s\n' % ', '.join([''] + sorted(message.flags)))
if message.previous_id and include_previous:
_write_comment('msgid %s' % _normalize(message.previous_id[0]),
prefix='|')
if len(message.previous_id) > 1:
_write_comment('msgid_plural %s' % _normalize(
message.previous_id[1]
), prefix='|')
_write_message(message)
_write('\n')
if not ignore_obsolete:
for message in catalog.obsolete.values():
for comment in message.user_comments:
_write_comment(comment)
_write_message(message, prefix='#~ ')
_write('\n')
| bsd-3-clause |
kazukisona/ThinkStats2 | code/relay_soln.py | 70 | 1675 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import thinkstats2
import thinkplot
import relay
def ObservedPmf(pmf, speed, label=None):
"""Returns a new Pmf representing speeds observed at a given speed.
The chance of observing a runner is proportional to the difference
in speed.
Args:
pmf: distribution of actual speeds
speed: speed of the observing runner
label: string label for the new dist
Returns:
Pmf object
"""
new = pmf.Copy(label=label)
for val in new.Values():
diff = abs(val - speed)
new.Mult(val, diff)
new.Normalize()
return new
def main():
results = relay.ReadResults()
speeds = relay.GetSpeeds(results)
speeds = relay.BinData(speeds, 3, 12, 100)
# plot the distribution of actual speeds
pmf = thinkstats2.Pmf(speeds, 'actual speeds')
# plot the biased distribution seen by the observer
biased = ObservedPmf(pmf, 7.5, label='observed speeds')
thinkplot.Pmf(biased)
thinkplot.Save(root='observed_speeds',
title='PMF of running speed',
xlabel='speed (mph)',
ylabel='PMF')
cdf = thinkstats2.Cdf(pmf)
cdf_biased = thinkstats2.Cdf(biased)
thinkplot.PrePlot(2)
thinkplot.Cdfs([cdf, cdf_biased])
thinkplot.Save(root='observed_speeds_cdf',
title='CDF of running speed',
xlabel='speed (mph)',
ylabel='CDF')
if __name__ == '__main__':
main()
| gpl-3.0 |
pselle/calibre | src/calibre/gui2/auto_add.py | 10 | 9782 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, tempfile, shutil, time
from threading import Thread, Event
from PyQt5.Qt import (QFileSystemWatcher, QObject, Qt, pyqtSignal, QTimer)
from calibre import prints
from calibre.ptempfile import PersistentTemporaryDirectory
from calibre.ebooks import BOOK_EXTENSIONS
from calibre.gui2 import gprefs
from calibre.gui2.dialogs.duplicates import DuplicatesQuestion
AUTO_ADDED = frozenset(BOOK_EXTENSIONS) - {'pdr', 'mbp', 'tan'}
class AllAllowed(object):
def __init__(self):
self.disallowed = frozenset(gprefs['blocked_auto_formats'])
def __contains__(self, x):
return x not in self.disallowed
class Worker(Thread):
def __init__(self, path, callback):
Thread.__init__(self)
self.daemon = True
self.keep_running = True
self.wake_up = Event()
self.path, self.callback = path, callback
self.staging = set()
if gprefs['auto_add_everything']:
self.allowed = AllAllowed()
else:
self.allowed = AUTO_ADDED - frozenset(gprefs['blocked_auto_formats'])
def run(self):
self.tdir = PersistentTemporaryDirectory('_auto_adder')
while self.keep_running:
self.wake_up.wait()
self.wake_up.clear()
if not self.keep_running:
break
try:
self.auto_add()
except:
import traceback
traceback.print_exc()
def auto_add(self):
from calibre.utils.ipc.simple_worker import fork_job, WorkerError
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre.ebooks.metadata.meta import metadata_from_filename
files = [x for x in os.listdir(self.path) if
# Must not be in the process of being added to the db
x not in self.staging
# Firefox creates 0 byte placeholder files when downloading
and os.stat(os.path.join(self.path, x)).st_size > 0
# Must be a file
and os.path.isfile(os.path.join(self.path, x))
# Must have read and write permissions
and os.access(os.path.join(self.path, x), os.R_OK|os.W_OK)
# Must be a known ebook file type
and os.path.splitext(x)[1][1:].lower() in self.allowed
]
data = {}
# Give any in progress copies time to complete
time.sleep(2)
for fname in files:
f = os.path.join(self.path, fname)
# Try opening the file for reading, if the OS prevents us, then at
# least on windows, it means the file is open in another
# application for writing. We will get notified by
# QFileSystemWatcher when writing is completed, so ignore for now.
try:
open(f, 'rb').close()
except:
continue
tdir = tempfile.mkdtemp(dir=self.tdir)
try:
fork_job('calibre.ebooks.metadata.meta',
'forked_read_metadata', (f, tdir), no_output=True)
except WorkerError as e:
prints('Failed to read metadata from:', fname)
prints(e.orig_tb)
except:
import traceback
traceback.print_exc()
# Ensure that the pre-metadata file size is present. If it isn't,
# write 0 so that the file is rescanned
szpath = os.path.join(tdir, 'size.txt')
try:
with open(szpath, 'rb') as f:
int(f.read())
except:
with open(szpath, 'wb') as f:
f.write(b'0')
opfpath = os.path.join(tdir, 'metadata.opf')
try:
if os.stat(opfpath).st_size < 30:
raise Exception('metadata reading failed')
except:
mi = metadata_from_filename(fname)
with open(opfpath, 'wb') as f:
f.write(metadata_to_opf(mi))
self.staging.add(fname)
data[fname] = tdir
if data:
self.callback(data)
class AutoAdder(QObject):
metadata_read = pyqtSignal(object)
auto_convert = pyqtSignal(object)
def __init__(self, path, parent):
QObject.__init__(self, parent)
if path and os.path.isdir(path) and os.access(path, os.R_OK|os.W_OK):
self.watcher = QFileSystemWatcher(self)
self.worker = Worker(path, self.metadata_read.emit)
self.watcher.directoryChanged.connect(self.dir_changed,
type=Qt.QueuedConnection)
self.metadata_read.connect(self.add_to_db,
type=Qt.QueuedConnection)
QTimer.singleShot(2000, self.initialize)
self.auto_convert.connect(self.do_auto_convert,
type=Qt.QueuedConnection)
elif path:
prints(path,
'is not a valid directory to watch for new ebooks, ignoring')
def initialize(self):
try:
if os.listdir(self.worker.path):
self.dir_changed()
except:
pass
self.watcher.addPath(self.worker.path)
def dir_changed(self, *args):
if os.path.isdir(self.worker.path) and os.access(self.worker.path,
os.R_OK|os.W_OK):
if not self.worker.is_alive():
self.worker.start()
self.worker.wake_up.set()
def stop(self):
if hasattr(self, 'worker'):
self.worker.keep_running = False
self.worker.wake_up.set()
def wait(self):
if hasattr(self, 'worker'):
self.worker.join()
def add_to_db(self, data):
from calibre.ebooks.metadata.opf2 import OPF
gui = self.parent()
if gui is None:
return
m = gui.library_view.model()
count = 0
needs_rescan = False
duplicates = []
added_ids = set()
for fname, tdir in data.iteritems():
paths = [os.path.join(self.worker.path, fname)]
sz = os.path.join(tdir, 'size.txt')
try:
with open(sz, 'rb') as f:
sz = int(f.read())
if sz != os.stat(paths[0]).st_size:
raise Exception('Looks like the file was written to after'
' we tried to read metadata')
except:
needs_rescan = True
try:
self.worker.staging.remove(fname)
except KeyError:
pass
continue
mi = os.path.join(tdir, 'metadata.opf')
if not os.access(mi, os.R_OK):
continue
mi = [OPF(open(mi, 'rb'), tdir,
populate_spine=False).to_book_metadata()]
dups, ids = m.add_books(paths,
[os.path.splitext(fname)[1][1:].upper()], mi,
add_duplicates=not gprefs['auto_add_check_for_duplicates'],
return_ids=True)
added_ids |= set(ids)
num = len(ids)
if dups:
path = dups[0][0]
with open(os.path.join(tdir, 'dup_cache.'+dups[1][0].lower()),
'wb') as dest, open(path, 'rb') as src:
shutil.copyfileobj(src, dest)
dups[0][0] = dest.name
duplicates.append(dups)
try:
os.remove(paths[0])
self.worker.staging.remove(fname)
except:
import traceback
traceback.print_exc()
count += num
if duplicates:
paths, formats, metadata = [], [], []
for p, f, mis in duplicates:
paths.extend(p)
formats.extend(f)
metadata.extend(mis)
dups = [(mic, mic.cover, [p]) for mic, p in zip(metadata, paths)]
d = DuplicatesQuestion(m.db, dups, parent=gui)
dups = tuple(d.duplicates)
if dups:
paths, formats, metadata = [], [], []
for mi, cover, book_paths in dups:
paths.extend(book_paths)
formats.extend([p.rpartition('.')[-1] for p in book_paths])
metadata.extend([mi for i in book_paths])
ids = m.add_books(paths, formats, metadata,
add_duplicates=True, return_ids=True)[1]
added_ids |= set(ids)
num = len(ids)
count += num
for tdir in data.itervalues():
try:
shutil.rmtree(tdir)
except:
pass
if added_ids and gprefs['auto_add_auto_convert']:
self.auto_convert.emit(added_ids)
if count > 0:
m.books_added(count)
gui.status_bar.show_message(_(
'Added %(num)d book(s) automatically from %(src)s') %
dict(num=count, src=self.worker.path), 2000)
gui.refresh_cover_browser()
if needs_rescan:
QTimer.singleShot(2000, self.dir_changed)
def do_auto_convert(self, added_ids):
gui = self.parent()
gui.iactions['Convert Books'].auto_convert_auto_add(added_ids)
| gpl-3.0 |
chrxr/wagtail | wagtail/wagtailadmin/views/account.py | 3 | 5345 | from __future__ import absolute_import, unicode_literals
from functools import wraps
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import views as auth_views
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.forms import PasswordChangeForm
from django.http import Http404
from django.shortcuts import redirect, render
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.debug import sensitive_post_parameters
from wagtail.utils.compat import user_is_authenticated
from wagtail.wagtailadmin import forms
from wagtail.wagtailcore.models import UserPagePermissionsProxy
from wagtail.wagtailusers.forms import NotificationPreferencesForm
from wagtail.wagtailusers.models import UserProfile
# Helper functions to check password management settings to enable/disable views as appropriate.
# These are functions rather than class-level constants so that they can be overridden in tests
# by override_settings
def password_management_enabled():
return getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True)
def password_reset_enabled():
return getattr(settings, 'WAGTAIL_PASSWORD_RESET_ENABLED', password_management_enabled())
# Views
def account(request):
user_perms = UserPagePermissionsProxy(request.user)
show_notification_preferences = user_perms.can_edit_pages() or user_perms.can_publish_pages()
return render(request, 'wagtailadmin/account/account.html', {
'show_change_password': password_management_enabled() and request.user.has_usable_password(),
'show_notification_preferences': show_notification_preferences
})
def change_password(request):
if not password_management_enabled():
raise Http404
can_change_password = request.user.has_usable_password()
if can_change_password:
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
messages.success(request, _("Your password has been changed successfully!"))
return redirect('wagtailadmin_account')
else:
form = PasswordChangeForm(request.user)
else:
form = None
return render(request, 'wagtailadmin/account/change_password.html', {
'form': form,
'can_change_password': can_change_password,
})
def _wrap_password_reset_view(view_func):
@wraps(view_func)
def wrapper(*args, **kwargs):
if not password_reset_enabled():
raise Http404
return view_func(*args, **kwargs)
return wrapper
password_reset = _wrap_password_reset_view(auth_views.password_reset)
password_reset_done = _wrap_password_reset_view(auth_views.password_reset_done)
password_reset_confirm = _wrap_password_reset_view(auth_views.password_reset_confirm)
password_reset_complete = _wrap_password_reset_view(auth_views.password_reset_complete)
def notification_preferences(request):
if request.method == 'POST':
form = NotificationPreferencesForm(request.POST, instance=UserProfile.get_for_user(request.user))
if form.is_valid():
form.save()
messages.success(request, _("Your preferences have been updated successfully!"))
return redirect('wagtailadmin_account')
else:
form = NotificationPreferencesForm(instance=UserProfile.get_for_user(request.user))
# quick-and-dirty catch-all in case the form has been rendered with no
# fields, as the user has no customisable permissions
if not form.fields:
return redirect('wagtailadmin_account')
return render(request, 'wagtailadmin/account/notification_preferences.html', {
'form': form,
})
@sensitive_post_parameters()
@never_cache
def login(request):
if user_is_authenticated(request.user) and request.user.has_perm('wagtailadmin.access_admin'):
return redirect('wagtailadmin_home')
else:
from django.contrib.auth import get_user_model
return auth_views.login(
request,
template_name='wagtailadmin/login.html',
authentication_form=forms.LoginForm,
extra_context={
'show_password_reset': password_reset_enabled(),
'username_field': get_user_model().USERNAME_FIELD,
},
)
def logout(request):
response = auth_views.logout(request, next_page='wagtailadmin_login')
messages.success(request, _('You have been successfully logged out.'))
# By default, logging out will generate a fresh sessionid cookie. We want to use the
# absence of sessionid as an indication that front-end pages are being viewed by a
# non-logged-in user and are therefore cacheable, so we forcibly delete the cookie here.
response.delete_cookie(settings.SESSION_COOKIE_NAME,
domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH)
# HACK: pretend that the session hasn't been modified, so that SessionMiddleware
# won't override the above and write a new cookie.
request.session.modified = False
return response
| bsd-3-clause |
CityofPittsburgh/pittsburgh-purchasing-suite | migrations/versions/31d29fbffe44_add_passwords_for_users.py | 1 | 1988 | """add passwords for users
Revision ID: 31d29fbffe44
Revises: 48c578b852fa
Create Date: 2016-01-20 23:33:36.893832
"""
# revision identifiers, used by Alembic.
revision = '31d29fbffe44'
down_revision = '48c578b852fa'
import random
from flask_security.utils import encrypt_password
from alembic import op
import sqlalchemy as sa
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
def rand_alphabet():
return encrypt_password(''.join(random.choice(ALPHABET) for i in range(16)))
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column(u'roles', sa.Column('description', sa.String(length=255), nullable=True))
op.add_column(u'users', sa.Column('confirmed_at', sa.DateTime(), nullable=True))
op.add_column(u'users', sa.Column('current_login_at', sa.DateTime(), nullable=True))
op.add_column(u'users', sa.Column('current_login_ip', sa.String(length=255), nullable=True))
op.add_column(u'users', sa.Column('last_login_at', sa.DateTime(), nullable=True))
op.add_column(u'users', sa.Column('last_login_ip', sa.String(length=255), nullable=True))
op.add_column(u'users', sa.Column('login_count', sa.Integer(), nullable=True))
op.add_column(u'users', sa.Column(
'password', sa.String(length=255), nullable=False,
default=rand_alphabet(), server_default=rand_alphabet()
))
### end Alembic commands ###
op.execute(sa.sql.text('''
UPDATE users SET confirmed_at = now()
'''))
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'users', 'password')
op.drop_column(u'users', 'login_count')
op.drop_column(u'users', 'last_login_ip')
op.drop_column(u'users', 'last_login_at')
op.drop_column(u'users', 'current_login_ip')
op.drop_column(u'users', 'current_login_at')
op.drop_column(u'users', 'confirmed_at')
op.drop_column(u'roles', 'description')
### end Alembic commands ###
| bsd-3-clause |
mbalasso/mynumpy | doc/sphinxext/compiler_unparse.py | 71 | 24704 | """ Turn compiler.ast structures back into executable python code.
The unparse method takes a compiler.ast tree and transforms it back into
valid python code. It is incomplete and currently only works for
import statements, function calls, function definitions, assignments, and
basic expressions.
Inspired by python-2.5-svn/Demo/parser/unparse.py
fixme: We may want to move to using _ast trees because the compiler for
them is about 6 times faster than compiler.compile.
"""
import sys
import cStringIO
from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
def unparse(ast, single_line_functions=False):
s = cStringIO.StringIO()
UnparseCompilerAst(ast, s, single_line_functions)
return s.getvalue().lstrip()
op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
class UnparseCompilerAst:
""" Methods in this class recursively traverse an AST and
output source code for the abstract syntax; original formatting
is disregarged.
"""
#########################################################################
# object interface.
#########################################################################
def __init__(self, tree, file = sys.stdout, single_line_functions=False):
""" Unparser(tree, file=sys.stdout) -> None.
Print the source for tree to file.
"""
self.f = file
self._single_func = single_line_functions
self._do_indent = True
self._indent = 0
self._dispatch(tree)
self._write("\n")
self.f.flush()
#########################################################################
# Unparser private interface.
#########################################################################
### format, output, and dispatch methods ################################
def _fill(self, text = ""):
"Indent a piece of text, according to the current indentation level"
if self._do_indent:
self._write("\n"+" "*self._indent + text)
else:
self._write(text)
def _write(self, text):
"Append a piece of text to the current line."
self.f.write(text)
def _enter(self):
"Print ':', and increase the indentation."
self._write(": ")
self._indent += 1
def _leave(self):
"Decrease the indentation level."
self._indent -= 1
def _dispatch(self, tree):
"_dispatcher function, _dispatching tree type T to method _T."
if isinstance(tree, list):
for t in tree:
self._dispatch(t)
return
meth = getattr(self, "_"+tree.__class__.__name__)
if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
return
meth(tree)
#########################################################################
# compiler.ast unparsing methods.
#
# There should be one method per concrete grammar type. They are
# organized in alphabetical order.
#########################################################################
def _Add(self, t):
self.__binary_op(t, '+')
def _And(self, t):
self._write(" (")
for i, node in enumerate(t.nodes):
self._dispatch(node)
if i != len(t.nodes)-1:
self._write(") and (")
self._write(")")
def _AssAttr(self, t):
""" Handle assigning an attribute of an object
"""
self._dispatch(t.expr)
self._write('.'+t.attrname)
def _Assign(self, t):
""" Expression Assignment such as "a = 1".
This only handles assignment in expressions. Keyword assignment
is handled separately.
"""
self._fill()
for target in t.nodes:
self._dispatch(target)
self._write(" = ")
self._dispatch(t.expr)
if not self._do_indent:
self._write('; ')
def _AssName(self, t):
""" Name on left hand side of expression.
Treat just like a name on the right side of an expression.
"""
self._Name(t)
def _AssTuple(self, t):
""" Tuple on left hand side of an expression.
"""
# _write each elements, separated by a comma.
for element in t.nodes[:-1]:
self._dispatch(element)
self._write(", ")
# Handle the last one without writing comma
last_element = t.nodes[-1]
self._dispatch(last_element)
def _AugAssign(self, t):
""" +=,-=,*=,/=,**=, etc. operations
"""
self._fill()
self._dispatch(t.node)
self._write(' '+t.op+' ')
self._dispatch(t.expr)
if not self._do_indent:
self._write(';')
def _Bitand(self, t):
""" Bit and operation.
"""
for i, node in enumerate(t.nodes):
self._write("(")
self._dispatch(node)
self._write(")")
if i != len(t.nodes)-1:
self._write(" & ")
def _Bitor(self, t):
""" Bit or operation
"""
for i, node in enumerate(t.nodes):
self._write("(")
self._dispatch(node)
self._write(")")
if i != len(t.nodes)-1:
self._write(" | ")
def _CallFunc(self, t):
""" Function call.
"""
self._dispatch(t.node)
self._write("(")
comma = False
for e in t.args:
if comma: self._write(", ")
else: comma = True
self._dispatch(e)
if t.star_args:
if comma: self._write(", ")
else: comma = True
self._write("*")
self._dispatch(t.star_args)
if t.dstar_args:
if comma: self._write(", ")
else: comma = True
self._write("**")
self._dispatch(t.dstar_args)
self._write(")")
def _Compare(self, t):
self._dispatch(t.expr)
for op, expr in t.ops:
self._write(" " + op + " ")
self._dispatch(expr)
def _Const(self, t):
""" A constant value such as an integer value, 3, or a string, "hello".
"""
self._dispatch(t.value)
def _Decorators(self, t):
""" Handle function decorators (eg. @has_units)
"""
for node in t.nodes:
self._dispatch(node)
def _Dict(self, t):
self._write("{")
for i, (k, v) in enumerate(t.items):
self._dispatch(k)
self._write(": ")
self._dispatch(v)
if i < len(t.items)-1:
self._write(", ")
self._write("}")
def _Discard(self, t):
""" Node for when return value is ignored such as in "foo(a)".
"""
self._fill()
self._dispatch(t.expr)
def _Div(self, t):
self.__binary_op(t, '/')
def _Ellipsis(self, t):
self._write("...")
def _From(self, t):
""" Handle "from xyz import foo, bar as baz".
"""
# fixme: Are From and ImportFrom handled differently?
self._fill("from ")
self._write(t.modname)
self._write(" import ")
for i, (name,asname) in enumerate(t.names):
if i != 0:
self._write(", ")
self._write(name)
if asname is not None:
self._write(" as "+asname)
def _Function(self, t):
""" Handle function definitions
"""
if t.decorators is not None:
self._fill("@")
self._dispatch(t.decorators)
self._fill("def "+t.name + "(")
defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
for i, arg in enumerate(zip(t.argnames, defaults)):
self._write(arg[0])
if arg[1] is not None:
self._write('=')
self._dispatch(arg[1])
if i < len(t.argnames)-1:
self._write(', ')
self._write(")")
if self._single_func:
self._do_indent = False
self._enter()
self._dispatch(t.code)
self._leave()
self._do_indent = True
def _Getattr(self, t):
""" Handle getting an attribute of an object
"""
if isinstance(t.expr, (Div, Mul, Sub, Add)):
self._write('(')
self._dispatch(t.expr)
self._write(')')
else:
self._dispatch(t.expr)
self._write('.'+t.attrname)
def _If(self, t):
self._fill()
for i, (compare,code) in enumerate(t.tests):
if i == 0:
self._write("if ")
else:
self._write("elif ")
self._dispatch(compare)
self._enter()
self._fill()
self._dispatch(code)
self._leave()
self._write("\n")
if t.else_ is not None:
self._write("else")
self._enter()
self._fill()
self._dispatch(t.else_)
self._leave()
self._write("\n")
def _IfExp(self, t):
self._dispatch(t.then)
self._write(" if ")
self._dispatch(t.test)
if t.else_ is not None:
self._write(" else (")
self._dispatch(t.else_)
self._write(")")
def _Import(self, t):
""" Handle "import xyz.foo".
"""
self._fill("import ")
for i, (name,asname) in enumerate(t.names):
if i != 0:
self._write(", ")
self._write(name)
if asname is not None:
self._write(" as "+asname)
def _Keyword(self, t):
""" Keyword value assignment within function calls and definitions.
"""
self._write(t.name)
self._write("=")
self._dispatch(t.expr)
def _List(self, t):
self._write("[")
for i,node in enumerate(t.nodes):
self._dispatch(node)
if i < len(t.nodes)-1:
self._write(", ")
self._write("]")
def _Module(self, t):
if t.doc is not None:
self._dispatch(t.doc)
self._dispatch(t.node)
def _Mul(self, t):
self.__binary_op(t, '*')
def _Name(self, t):
self._write(t.name)
def _NoneType(self, t):
self._write("None")
def _Not(self, t):
self._write('not (')
self._dispatch(t.expr)
self._write(')')
def _Or(self, t):
self._write(" (")
for i, node in enumerate(t.nodes):
self._dispatch(node)
if i != len(t.nodes)-1:
self._write(") or (")
self._write(")")
def _Pass(self, t):
self._write("pass\n")
def _Printnl(self, t):
self._fill("print ")
if t.dest:
self._write(">> ")
self._dispatch(t.dest)
self._write(", ")
comma = False
for node in t.nodes:
if comma: self._write(', ')
else: comma = True
self._dispatch(node)
def _Power(self, t):
self.__binary_op(t, '**')
def _Return(self, t):
self._fill("return ")
if t.value:
if isinstance(t.value, Tuple):
text = ', '.join([ name.name for name in t.value.asList() ])
self._write(text)
else:
self._dispatch(t.value)
if not self._do_indent:
self._write('; ')
def _Slice(self, t):
self._dispatch(t.expr)
self._write("[")
if t.lower:
self._dispatch(t.lower)
self._write(":")
if t.upper:
self._dispatch(t.upper)
#if t.step:
# self._write(":")
# self._dispatch(t.step)
self._write("]")
def _Sliceobj(self, t):
for i, node in enumerate(t.nodes):
if i != 0:
self._write(":")
if not (isinstance(node, Const) and node.value is None):
self._dispatch(node)
def _Stmt(self, tree):
for node in tree.nodes:
self._dispatch(node)
def _Sub(self, t):
self.__binary_op(t, '-')
def _Subscript(self, t):
self._dispatch(t.expr)
self._write("[")
for i, value in enumerate(t.subs):
if i != 0:
self._write(",")
self._dispatch(value)
self._write("]")
def _TryExcept(self, t):
self._fill("try")
self._enter()
self._dispatch(t.body)
self._leave()
for handler in t.handlers:
self._fill('except ')
self._dispatch(handler[0])
if handler[1] is not None:
self._write(', ')
self._dispatch(handler[1])
self._enter()
self._dispatch(handler[2])
self._leave()
if t.else_:
self._fill("else")
self._enter()
self._dispatch(t.else_)
self._leave()
def _Tuple(self, t):
if not t.nodes:
# Empty tuple.
self._write("()")
else:
self._write("(")
# _write each elements, separated by a comma.
for element in t.nodes[:-1]:
self._dispatch(element)
self._write(", ")
# Handle the last one without writing comma
last_element = t.nodes[-1]
self._dispatch(last_element)
self._write(")")
def _UnaryAdd(self, t):
self._write("+")
self._dispatch(t.expr)
def _UnarySub(self, t):
self._write("-")
self._dispatch(t.expr)
def _With(self, t):
self._fill('with ')
self._dispatch(t.expr)
if t.vars:
self._write(' as ')
self._dispatch(t.vars.name)
self._enter()
self._dispatch(t.body)
self._leave()
self._write('\n')
def _int(self, t):
self._write(repr(t))
def __binary_op(self, t, symbol):
# Check if parenthesis are needed on left side and then dispatch
has_paren = False
left_class = str(t.left.__class__)
if (left_class in op_precedence.keys() and
op_precedence[left_class] < op_precedence[str(t.__class__)]):
has_paren = True
if has_paren:
self._write('(')
self._dispatch(t.left)
if has_paren:
self._write(')')
# Write the appropriate symbol for operator
self._write(symbol)
# Check if parenthesis are needed on the right side and then dispatch
has_paren = False
right_class = str(t.right.__class__)
if (right_class in op_precedence.keys() and
op_precedence[right_class] < op_precedence[str(t.__class__)]):
has_paren = True
if has_paren:
self._write('(')
self._dispatch(t.right)
if has_paren:
self._write(')')
def _float(self, t):
# if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
# We prefer str here.
self._write(str(t))
def _str(self, t):
self._write(repr(t))
def _tuple(self, t):
self._write(str(t))
#########################################################################
# These are the methods from the _ast modules unparse.
#
# As our needs to handle more advanced code increase, we may want to
# modify some of the methods below so that they work for compiler.ast.
#########################################################################
# # stmt
# def _Expr(self, tree):
# self._fill()
# self._dispatch(tree.value)
#
# def _Import(self, t):
# self._fill("import ")
# first = True
# for a in t.names:
# if first:
# first = False
# else:
# self._write(", ")
# self._write(a.name)
# if a.asname:
# self._write(" as "+a.asname)
#
## def _ImportFrom(self, t):
## self._fill("from ")
## self._write(t.module)
## self._write(" import ")
## for i, a in enumerate(t.names):
## if i == 0:
## self._write(", ")
## self._write(a.name)
## if a.asname:
## self._write(" as "+a.asname)
## # XXX(jpe) what is level for?
##
#
# def _Break(self, t):
# self._fill("break")
#
# def _Continue(self, t):
# self._fill("continue")
#
# def _Delete(self, t):
# self._fill("del ")
# self._dispatch(t.targets)
#
# def _Assert(self, t):
# self._fill("assert ")
# self._dispatch(t.test)
# if t.msg:
# self._write(", ")
# self._dispatch(t.msg)
#
# def _Exec(self, t):
# self._fill("exec ")
# self._dispatch(t.body)
# if t.globals:
# self._write(" in ")
# self._dispatch(t.globals)
# if t.locals:
# self._write(", ")
# self._dispatch(t.locals)
#
# def _Print(self, t):
# self._fill("print ")
# do_comma = False
# if t.dest:
# self._write(">>")
# self._dispatch(t.dest)
# do_comma = True
# for e in t.values:
# if do_comma:self._write(", ")
# else:do_comma=True
# self._dispatch(e)
# if not t.nl:
# self._write(",")
#
# def _Global(self, t):
# self._fill("global")
# for i, n in enumerate(t.names):
# if i != 0:
# self._write(",")
# self._write(" " + n)
#
# def _Yield(self, t):
# self._fill("yield")
# if t.value:
# self._write(" (")
# self._dispatch(t.value)
# self._write(")")
#
# def _Raise(self, t):
# self._fill('raise ')
# if t.type:
# self._dispatch(t.type)
# if t.inst:
# self._write(", ")
# self._dispatch(t.inst)
# if t.tback:
# self._write(", ")
# self._dispatch(t.tback)
#
#
# def _TryFinally(self, t):
# self._fill("try")
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# self._fill("finally")
# self._enter()
# self._dispatch(t.finalbody)
# self._leave()
#
# def _excepthandler(self, t):
# self._fill("except ")
# if t.type:
# self._dispatch(t.type)
# if t.name:
# self._write(", ")
# self._dispatch(t.name)
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# def _ClassDef(self, t):
# self._write("\n")
# self._fill("class "+t.name)
# if t.bases:
# self._write("(")
# for a in t.bases:
# self._dispatch(a)
# self._write(", ")
# self._write(")")
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# def _FunctionDef(self, t):
# self._write("\n")
# for deco in t.decorators:
# self._fill("@")
# self._dispatch(deco)
# self._fill("def "+t.name + "(")
# self._dispatch(t.args)
# self._write(")")
# self._enter()
# self._dispatch(t.body)
# self._leave()
#
# def _For(self, t):
# self._fill("for ")
# self._dispatch(t.target)
# self._write(" in ")
# self._dispatch(t.iter)
# self._enter()
# self._dispatch(t.body)
# self._leave()
# if t.orelse:
# self._fill("else")
# self._enter()
# self._dispatch(t.orelse)
# self._leave
#
# def _While(self, t):
# self._fill("while ")
# self._dispatch(t.test)
# self._enter()
# self._dispatch(t.body)
# self._leave()
# if t.orelse:
# self._fill("else")
# self._enter()
# self._dispatch(t.orelse)
# self._leave
#
# # expr
# def _Str(self, tree):
# self._write(repr(tree.s))
##
# def _Repr(self, t):
# self._write("`")
# self._dispatch(t.value)
# self._write("`")
#
# def _Num(self, t):
# self._write(repr(t.n))
#
# def _ListComp(self, t):
# self._write("[")
# self._dispatch(t.elt)
# for gen in t.generators:
# self._dispatch(gen)
# self._write("]")
#
# def _GeneratorExp(self, t):
# self._write("(")
# self._dispatch(t.elt)
# for gen in t.generators:
# self._dispatch(gen)
# self._write(")")
#
# def _comprehension(self, t):
# self._write(" for ")
# self._dispatch(t.target)
# self._write(" in ")
# self._dispatch(t.iter)
# for if_clause in t.ifs:
# self._write(" if ")
# self._dispatch(if_clause)
#
# def _IfExp(self, t):
# self._dispatch(t.body)
# self._write(" if ")
# self._dispatch(t.test)
# if t.orelse:
# self._write(" else ")
# self._dispatch(t.orelse)
#
# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
# def _UnaryOp(self, t):
# self._write(self.unop[t.op.__class__.__name__])
# self._write("(")
# self._dispatch(t.operand)
# self._write(")")
#
# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
# "FloorDiv":"//", "Pow": "**"}
# def _BinOp(self, t):
# self._write("(")
# self._dispatch(t.left)
# self._write(")" + self.binop[t.op.__class__.__name__] + "(")
# self._dispatch(t.right)
# self._write(")")
#
# boolops = {_ast.And: 'and', _ast.Or: 'or'}
# def _BoolOp(self, t):
# self._write("(")
# self._dispatch(t.values[0])
# for v in t.values[1:]:
# self._write(" %s " % self.boolops[t.op.__class__])
# self._dispatch(v)
# self._write(")")
#
# def _Attribute(self,t):
# self._dispatch(t.value)
# self._write(".")
# self._write(t.attr)
#
## def _Call(self, t):
## self._dispatch(t.func)
## self._write("(")
## comma = False
## for e in t.args:
## if comma: self._write(", ")
## else: comma = True
## self._dispatch(e)
## for e in t.keywords:
## if comma: self._write(", ")
## else: comma = True
## self._dispatch(e)
## if t.starargs:
## if comma: self._write(", ")
## else: comma = True
## self._write("*")
## self._dispatch(t.starargs)
## if t.kwargs:
## if comma: self._write(", ")
## else: comma = True
## self._write("**")
## self._dispatch(t.kwargs)
## self._write(")")
#
# # slice
# def _Index(self, t):
# self._dispatch(t.value)
#
# def _ExtSlice(self, t):
# for i, d in enumerate(t.dims):
# if i != 0:
# self._write(': ')
# self._dispatch(d)
#
# # others
# def _arguments(self, t):
# first = True
# nonDef = len(t.args)-len(t.defaults)
# for a in t.args[0:nonDef]:
# if first:first = False
# else: self._write(", ")
# self._dispatch(a)
# for a,d in zip(t.args[nonDef:], t.defaults):
# if first:first = False
# else: self._write(", ")
# self._dispatch(a),
# self._write("=")
# self._dispatch(d)
# if t.vararg:
# if first:first = False
# else: self._write(", ")
# self._write("*"+t.vararg)
# if t.kwarg:
# if first:first = False
# else: self._write(", ")
# self._write("**"+t.kwarg)
#
## def _keyword(self, t):
## self._write(t.arg)
## self._write("=")
## self._dispatch(t.value)
#
# def _Lambda(self, t):
# self._write("lambda ")
# self._dispatch(t.args)
# self._write(": ")
# self._dispatch(t.body)
| bsd-3-clause |
zarboz/nvidia_shield | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
prasanna08/oppia | core/domain/improvements_domain.py | 4 | 7872 | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects related to Oppia improvement tasks."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import user_services
from core.platform import models
import python_utils
import utils
(improvements_models,) = (
models.Registry.import_models([models.NAMES.improvements]))
class TaskEntry(python_utils.OBJECT):
"""Domain object representing an actionable task from the improvements tab.
Attributes:
entity_type: str. The type of entity the task entry refers to.
For example, "exploration".
entity_id: str. The ID of the entity the task entry refers to.
For example, an exploration ID.
entity_version: int. The version of the entity the task entry refers to.
For example, an exploration's version.
task_type: str. The type of task the task entry tracks.
target_type: str. The type of sub-entity the task entry refers to.
For example, "state" when entity type is "exploration".
target_id: str. The ID of the sub-entity the task entry refers to.
For example, the state name of an exploration.
issue_description: str or None. The sentence generated by Oppia to
describe why the task was created.
status: str. Tracks the state/progress of the task entry.
resolver_id: str or None. The corresponding user who resolved this task.
resolved_on: datetime or None. The datetime at which this task was
resolved.
"""
def __init__(
self, entity_type, entity_id, entity_version, task_type,
target_type, target_id, issue_description, status, resolver_id,
resolved_on):
"""Initializes a new TaskEntry domain object from the given values.
Args:
entity_type: str. The type of entity the task entry refers to.
For example: "exploration".
entity_id: str. The ID of the entity the task entry refers to.
For example: an exploration ID.
entity_version: int. The version of the entity the task entry refers
to. For example: an exploration's version.
task_type: str. The type of task the task entry tracks.
target_type: str. The type of sub-entity the task entry refers to.
For example, when entity type is "exploration": "state".
target_id: str. The ID of the sub-entity the task entry refers to.
For example, the state name of an exploration.
issue_description: str. The sentence generated by Oppia to describe
why the task was created.
status: str. Tracks the state/progress of the task entry.
resolver_id: str. The corresponding user who resolved this task.
Only used when status is resolved, otherwise replaced with None.
resolved_on: datetime. The datetime at which this task was resolved.
Only used when status is resolved, otherwise replaced with None.
"""
if status != improvements_models.TASK_STATUS_RESOLVED:
resolver_id = None
resolved_on = None
self.entity_type = entity_type
self.entity_id = entity_id
self.entity_version = entity_version
self.task_type = task_type
self.target_type = target_type
self.target_id = target_id
self.issue_description = issue_description
self.status = status
self.resolver_id = resolver_id
self.resolved_on = resolved_on
@property
def task_id(self):
"""Returns the unique identifier of this task.
Value has the form: "[entity_type].[entity_id].[entity_version].
[task_type].[target_type].[target_id]"
Returns:
str. The ID of this task.
"""
return improvements_models.TaskEntryModel.generate_task_id(
self.entity_type, self.entity_id, self.entity_version,
self.task_type, self.target_type, self.target_id)
@property
def composite_entity_id(self):
"""Utility field which results in a 20% speedup compared to querying by
each of the invididual fields used to compose it.
Value has the form: "[entity_type].[entity_id].[entity_version]".
Returns:
str. The value of the utility field.
"""
return improvements_models.TaskEntryModel.generate_composite_entity_id(
self.entity_type, self.entity_id, self.entity_version)
def to_dict(self):
"""Returns a dict-representation of the task.
Returns:
dict. Contains the following keys:
entity_type: str. The type of entity the task entry refers to.
For example, "exploration".
entity_id: str. The ID of the entity the task entry refers to.
For example, an exploration ID.
entity_version: int. The version of the entity the task entry
refers to. For example, an exploration's version.
task_type: str. The type of task the task entry tracks.
target_type: str. The type of sub-entity the task entry refers
to. For example, "state" when entity type is "exploration".
target_id: str. The ID of the sub-entity the task entry refers
to. For example, the state name of an exploration.
issue_description: str. The sentence generated by Oppia to
describe why the task was created.
status: str. Tracks the state/progress of the task entry.
resolver_username: str. Username of the user who resolved the
task when status is resolved. Otherwise None.
resolver_profile_picture_data_url: str. Profile picture URL of
the user who resolved the task when status is resolved.
Otherwise None.
resolved_on_msecs: float. Time in milliseconds since epoch at
which the task was resolved when status is resolved.
Otherwise None.
"""
resolver_settings = (
self.resolver_id and
user_services.get_user_settings(self.resolver_id, strict=True))
return {
'entity_type': self.entity_type,
'entity_id': self.entity_id,
'entity_version': self.entity_version,
'task_type': self.task_type,
'target_type': self.target_type,
'target_id': self.target_id,
'issue_description': self.issue_description,
'status': self.status,
'resolver_username': (
resolver_settings and resolver_settings.username),
'resolver_profile_picture_data_url': (
resolver_settings and
resolver_settings.profile_picture_data_url),
'resolved_on_msecs': (
self.resolved_on and
utils.get_time_in_millisecs(self.resolved_on)),
}
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.