repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
suncycheng/intellij-community | python/helpers/py3only/docutils/utils/math/latex2mathml.py | 46 | 17099 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# :Id: $Id: latex2mathml.py 7668 2013-06-04 12:46:30Z milde $
# :Copyright: © 2010 Günter Milde.
# Based on rst2mathml.py from the latex_math sandbox project
# © 2005 Jens Jørgen Mortensen
# :License: Released under the terms of the `2-Clause BSD license`_, in short:
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
"""Convert LaTex math code into presentational MathML"""
# Based on the `latex_math` sandbox project by Jens Jørgen Mortensen
import docutils.utils.math.tex2unichar as tex2unichar
# TeX spacing combining
over = {'acute': '\u00B4', # u'\u0301',
'bar': '\u00AF', # u'\u0304',
'breve': '\u02D8', # u'\u0306',
'check': '\u02C7', # u'\u030C',
'dot': '\u02D9', # u'\u0307',
'ddot': '\u00A8', # u'\u0308',
'dddot': '\u20DB',
'grave': '`', # u'\u0300',
'hat': '^', # u'\u0302',
'mathring': '\u02DA', # u'\u030A',
'overleftrightarrow': '\u20e1',
# 'overline': # u'\u0305',
'tilde': '\u02DC', # u'\u0303',
'vec': '\u20D7'}
Greek = { # Capital Greek letters: (upright in TeX style)
'Phi':'\u03a6', 'Xi':'\u039e', 'Sigma':'\u03a3',
'Psi':'\u03a8', 'Delta':'\u0394', 'Theta':'\u0398',
'Upsilon':'\u03d2', 'Pi':'\u03a0', 'Omega':'\u03a9',
'Gamma':'\u0393', 'Lambda':'\u039b'}
letters = tex2unichar.mathalpha
special = tex2unichar.mathbin # Binary symbols
special.update(tex2unichar.mathrel) # Relation symbols, arrow symbols
special.update(tex2unichar.mathord) # Miscellaneous symbols
special.update(tex2unichar.mathop) # Variable-sized symbols
special.update(tex2unichar.mathopen) # Braces
special.update(tex2unichar.mathclose) # Braces
special.update(tex2unichar.mathfence)
sumintprod = ''.join([special[symbol] for symbol in
['sum', 'int', 'oint', 'prod']])
functions = ['arccos', 'arcsin', 'arctan', 'arg', 'cos', 'cosh',
'cot', 'coth', 'csc', 'deg', 'det', 'dim',
'exp', 'gcd', 'hom', 'inf', 'ker', 'lg',
'lim', 'liminf', 'limsup', 'ln', 'log', 'max',
'min', 'Pr', 'sec', 'sin', 'sinh', 'sup',
'tan', 'tanh',
'injlim', 'varinjlim', 'varlimsup',
'projlim', 'varliminf', 'varprojlim']
mathbb = {
'A': '\U0001D538',
'B': '\U0001D539',
'C': '\u2102',
'D': '\U0001D53B',
'E': '\U0001D53C',
'F': '\U0001D53D',
'G': '\U0001D53E',
'H': '\u210D',
'I': '\U0001D540',
'J': '\U0001D541',
'K': '\U0001D542',
'L': '\U0001D543',
'M': '\U0001D544',
'N': '\u2115',
'O': '\U0001D546',
'P': '\u2119',
'Q': '\u211A',
'R': '\u211D',
'S': '\U0001D54A',
'T': '\U0001D54B',
'U': '\U0001D54C',
'V': '\U0001D54D',
'W': '\U0001D54E',
'X': '\U0001D54F',
'Y': '\U0001D550',
'Z': '\u2124',
}
mathscr = {
'A': '\U0001D49C',
'B': '\u212C', # bernoulli function
'C': '\U0001D49E',
'D': '\U0001D49F',
'E': '\u2130',
'F': '\u2131',
'G': '\U0001D4A2',
'H': '\u210B', # hamiltonian
'I': '\u2110',
'J': '\U0001D4A5',
'K': '\U0001D4A6',
'L': '\u2112', # lagrangian
'M': '\u2133', # physics m-matrix
'N': '\U0001D4A9',
'O': '\U0001D4AA',
'P': '\U0001D4AB',
'Q': '\U0001D4AC',
'R': '\u211B',
'S': '\U0001D4AE',
'T': '\U0001D4AF',
'U': '\U0001D4B0',
'V': '\U0001D4B1',
'W': '\U0001D4B2',
'X': '\U0001D4B3',
'Y': '\U0001D4B4',
'Z': '\U0001D4B5',
'a': '\U0001D4B6',
'b': '\U0001D4B7',
'c': '\U0001D4B8',
'd': '\U0001D4B9',
'e': '\u212F',
'f': '\U0001D4BB',
'g': '\u210A',
'h': '\U0001D4BD',
'i': '\U0001D4BE',
'j': '\U0001D4BF',
'k': '\U0001D4C0',
'l': '\U0001D4C1',
'm': '\U0001D4C2',
'n': '\U0001D4C3',
'o': '\u2134', # order of
'p': '\U0001D4C5',
'q': '\U0001D4C6',
'r': '\U0001D4C7',
's': '\U0001D4C8',
't': '\U0001D4C9',
'u': '\U0001D4CA',
'v': '\U0001D4CB',
'w': '\U0001D4CC',
'x': '\U0001D4CD',
'y': '\U0001D4CE',
'z': '\U0001D4CF',
}
negatables = {'=': '\u2260',
'\in': '\u2209',
'\equiv': '\u2262'}
# LaTeX to MathML translation stuff:
class math:
"""Base class for MathML elements."""
nchildren = 1000000
"""Required number of children"""
def __init__(self, children=None, inline=None):
"""math([children]) -> MathML element
children can be one child or a list of children."""
self.children = []
if children is not None:
if type(children) is list:
for child in children:
self.append(child)
else:
# Only one child:
self.append(children)
if inline is not None:
self.inline = inline
def __repr__(self):
if hasattr(self, 'children'):
return self.__class__.__name__ + '(%s)' % \
','.join([repr(child) for child in self.children])
else:
return self.__class__.__name__
def full(self):
"""Room for more children?"""
return len(self.children) >= self.nchildren
def append(self, child):
"""append(child) -> element
Appends child and returns self if self is not full or first
non-full parent."""
assert not self.full()
self.children.append(child)
child.parent = self
node = self
while node.full():
node = node.parent
return node
def delete_child(self):
"""delete_child() -> child
Delete last child and return it."""
child = self.children[-1]
del self.children[-1]
return child
def close(self):
"""close() -> parent
Close element and return first non-full element."""
parent = self.parent
while parent.full():
parent = parent.parent
return parent
def xml(self):
"""xml() -> xml-string"""
return self.xml_start() + self.xml_body() + self.xml_end()
def xml_start(self):
if not hasattr(self, 'inline'):
return ['<%s>' % self.__class__.__name__]
xmlns = 'http://www.w3.org/1998/Math/MathML'
if self.inline:
return ['<math xmlns="%s">' % xmlns]
else:
return ['<math xmlns="%s" mode="display">' % xmlns]
def xml_end(self):
return ['</%s>' % self.__class__.__name__]
def xml_body(self):
xml = []
for child in self.children:
xml.extend(child.xml())
return xml
class mrow(math):
def xml_start(self):
return ['\n<%s>' % self.__class__.__name__]
class mtable(math):
def xml_start(self):
return ['\n<%s>' % self.__class__.__name__]
class mtr(mrow): pass
class mtd(mrow): pass
class mx(math):
"""Base class for mo, mi, and mn"""
nchildren = 0
def __init__(self, data):
self.data = data
def xml_body(self):
return [self.data]
class mo(mx):
translation = {'<': '<', '>': '>'}
def xml_body(self):
return [self.translation.get(self.data, self.data)]
class mi(mx): pass
class mn(mx): pass
class msub(math):
nchildren = 2
class msup(math):
nchildren = 2
class msqrt(math):
nchildren = 1
class mroot(math):
nchildren = 2
class mfrac(math):
nchildren = 2
class msubsup(math):
nchildren = 3
def __init__(self, children=None, reversed=False):
self.reversed = reversed
math.__init__(self, children)
def xml(self):
if self.reversed:
## self.children[1:3] = self.children[2:0:-1]
self.children[1:3] = [self.children[2], self.children[1]]
self.reversed = False
return math.xml(self)
class mfenced(math):
translation = {'\\{': '{', '\\langle': '\u2329',
'\\}': '}', '\\rangle': '\u232A',
'.': ''}
def __init__(self, par):
self.openpar = par
math.__init__(self)
def xml_start(self):
open = self.translation.get(self.openpar, self.openpar)
close = self.translation.get(self.closepar, self.closepar)
return ['<mfenced open="%s" close="%s">' % (open, close)]
class mspace(math):
nchildren = 0
class mstyle(math):
def __init__(self, children=None, nchildren=None, **kwargs):
if nchildren is not None:
self.nchildren = nchildren
math.__init__(self, children)
self.attrs = kwargs
def xml_start(self):
return ['<mstyle '] + ['%s="%s"' % item
for item in list(self.attrs.items())] + ['>']
class mover(math):
nchildren = 2
def __init__(self, children=None, reversed=False):
self.reversed = reversed
math.__init__(self, children)
def xml(self):
if self.reversed:
self.children.reverse()
self.reversed = False
return math.xml(self)
class munder(math):
nchildren = 2
class munderover(math):
nchildren = 3
def __init__(self, children=None):
math.__init__(self, children)
class mtext(math):
nchildren = 0
def __init__(self, text):
self.text = text
def xml_body(self):
return [self.text]
def parse_latex_math(string, inline=True):
"""parse_latex_math(string [,inline]) -> MathML-tree
Returns a MathML-tree parsed from string. inline=True is for
inline math and inline=False is for displayed math.
tree is the whole tree and node is the current element."""
# Normalize white-space:
string = ' '.join(string.split())
if inline:
node = mrow()
tree = math(node, inline=True)
else:
node = mtd()
tree = math(mtable(mtr(node)), inline=False)
while len(string) > 0:
n = len(string)
c = string[0]
skip = 1 # number of characters consumed
if n > 1:
c2 = string[1]
else:
c2 = ''
## print n, string, c, c2, node.__class__.__name__
if c == ' ':
pass
elif c == '\\':
if c2 in '{}':
node = node.append(mo(c2))
skip = 2
elif c2 == ' ':
node = node.append(mspace())
skip = 2
elif c2 == ',': # TODO: small space
node = node.append(mspace())
skip = 2
elif c2.isalpha():
# We have a LaTeX-name:
i = 2
while i < n and string[i].isalpha():
i += 1
name = string[1:i]
node, skip = handle_keyword(name, node, string[i:])
skip += i
elif c2 == '\\':
# End of a row:
entry = mtd()
row = mtr(entry)
node.close().close().append(row)
node = entry
skip = 2
else:
raise SyntaxError(r'Syntax error: "%s%s"' % (c, c2))
elif c.isalpha():
node = node.append(mi(c))
elif c.isdigit():
node = node.append(mn(c))
elif c in "+-*/=()[]|<>,.!?':;@":
node = node.append(mo(c))
elif c == '_':
child = node.delete_child()
if isinstance(child, msup):
sub = msubsup(child.children, reversed=True)
elif isinstance(child, mo) and child.data in sumintprod:
sub = munder(child)
else:
sub = msub(child)
node.append(sub)
node = sub
elif c == '^':
child = node.delete_child()
if isinstance(child, msub):
sup = msubsup(child.children)
elif isinstance(child, mo) and child.data in sumintprod:
sup = mover(child)
elif (isinstance(child, munder) and
child.children[0].data in sumintprod):
sup = munderover(child.children)
else:
sup = msup(child)
node.append(sup)
node = sup
elif c == '{':
row = mrow()
node.append(row)
node = row
elif c == '}':
node = node.close()
elif c == '&':
entry = mtd()
node.close().append(entry)
node = entry
else:
raise SyntaxError(r'Illegal character: "%s"' % c)
string = string[skip:]
return tree
def handle_keyword(name, node, string):
skip = 0
if len(string) > 0 and string[0] == ' ':
string = string[1:]
skip = 1
if name == 'begin':
if not string.startswith('{matrix}'):
raise SyntaxError('Environment not supported! '
'Supported environment: "matrix".')
skip += 8
entry = mtd()
table = mtable(mtr(entry))
node.append(table)
node = entry
elif name == 'end':
if not string.startswith('{matrix}'):
raise SyntaxError(r'Expected "\end{matrix}"!')
skip += 8
node = node.close().close().close()
elif name in ('text', 'mathrm'):
if string[0] != '{':
raise SyntaxError(r'Expected "\text{...}"!')
i = string.find('}')
if i == -1:
raise SyntaxError(r'Expected "\text{...}"!')
node = node.append(mtext(string[1:i]))
skip += i + 1
elif name == 'sqrt':
sqrt = msqrt()
node.append(sqrt)
node = sqrt
elif name == 'frac':
frac = mfrac()
node.append(frac)
node = frac
elif name == 'left':
for par in ['(', '[', '|', '\\{', '\\langle', '.']:
if string.startswith(par):
break
else:
raise SyntaxError('Missing left-brace!')
fenced = mfenced(par)
node.append(fenced)
row = mrow()
fenced.append(row)
node = row
skip += len(par)
elif name == 'right':
for par in [')', ']', '|', '\\}', '\\rangle', '.']:
if string.startswith(par):
break
else:
raise SyntaxError('Missing right-brace!')
node = node.close()
node.closepar = par
node = node.close()
skip += len(par)
elif name == 'not':
for operator in negatables:
if string.startswith(operator):
break
else:
raise SyntaxError(r'Expected something to negate: "\not ..."!')
node = node.append(mo(negatables[operator]))
skip += len(operator)
elif name == 'mathbf':
style = mstyle(nchildren=1, fontweight='bold')
node.append(style)
node = style
elif name == 'mathbb':
if string[0] != '{' or not string[1].isupper() or string[2] != '}':
raise SyntaxError(r'Expected something like "\mathbb{A}"!')
node = node.append(mi(mathbb[string[1]]))
skip += 3
elif name in ('mathscr', 'mathcal'):
if string[0] != '{' or string[2] != '}':
raise SyntaxError(r'Expected something like "\mathscr{A}"!')
node = node.append(mi(mathscr[string[1]]))
skip += 3
elif name == 'colon': # "normal" colon, not binary operator
node = node.append(mo(':')) # TODO: add ``lspace="0pt"``
elif name in Greek: # Greek capitals (upright in "TeX style")
node = node.append(mo(Greek[name]))
# TODO: "ISO style" sets them italic. Could we use a class argument
# to enable styling via CSS?
elif name in letters:
node = node.append(mi(letters[name]))
elif name in special:
node = node.append(mo(special[name]))
elif name in functions:
node = node.append(mo(name))
elif name in over:
ovr = mover(mo(over[name]), reversed=True)
node.append(ovr)
node = ovr
else:
raise SyntaxError('Unknown LaTeX command: ' + name)
return node, skip
| apache-2.0 |
xlzdew/seleniumN | py/selenium/webdriver/ie/service.py | 17 | 3917 | #!/usr/bin/python
#
# Copyright 2012 Webdriver_name committers
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from subprocess import PIPE
import time
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common import utils
class Service(object):
"""
Object that manages the starting and stopping of the IEDriver
"""
def __init__(self, executable_path, port=0, host=None, log_level=None, log_file=None):
"""
Creates a new instance of the Service
:Args:
- executable_path : Path to the IEDriver
- port : Port the service is running on
- host : IP address the service port is bound
- log_level : Level of logging of service, may be "FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE".
Default is "FATAL".
- log_file : Target of logging of service, may be "stdout", "stderr" or file path.
Default is "stdout"."""
self.port = port
self.path = executable_path
if self.port == 0:
self.port = utils.free_port()
self.host = host
self.log_level = log_level
self.log_file = log_file
def start(self):
"""
Starts the IEDriver Service.
:Exceptions:
- WebDriverException : Raised either when it can't start the service
or when it can't connect to the service
"""
try:
cmd = [self.path, "--port=%d" % self.port]
if self.host is not None:
cmd.append("--host=%s" % self.host)
if self.log_level is not None:
cmd.append("--log-level=%s" % self.log_level)
if self.log_file is not None:
cmd.append("--log-file=%s" % self.log_file)
self.process = subprocess.Popen(cmd,
stdout=PIPE, stderr=PIPE)
except TypeError:
raise
except:
raise WebDriverException(
"IEDriver executable needs to be available in the path. \
Please download from http://selenium-release.storage.googleapis.com/index.html\
and read up at http://code.google.com/p/selenium/wiki/InternetExplorerDriver")
count = 0
while not utils.is_url_connectable(self.port):
count += 1
time.sleep(1)
if count == 30:
raise WebDriverException("Can not connect to the IEDriver")
def stop(self):
"""
Tells the IEDriver to stop and cleans up the process
"""
#If its dead dont worry
if self.process is None:
return
#Tell the Server to die!
try:
from urllib import request as url_request
except ImportError:
import urllib2 as url_request
url_request.urlopen("http://127.0.0.1:%d/shutdown" % self.port)
count = 0
while utils.is_connectable(self.port):
if count == 30:
break
count += 1
time.sleep(1)
#Tell the Server to properly die in case
try:
if self.process:
self.process.kill()
self.process.wait()
except WindowsError:
# kill may not be available under windows environment
pass
| apache-2.0 |
ilpianista/ansible | lib/ansible/module_utils/facts/network/base.py | 88 | 2400 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.collector import BaseFactCollector
class Network:
"""
This is a generic Network subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you must define:
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
All subclasses MUST define platform.
"""
platform = 'Generic'
# FIXME: remove load_on_init when we can
def __init__(self, module, load_on_init=False):
self.module = module
# TODO: more or less abstract/NotImplemented
def populate(self, collected_facts=None):
return {}
class NetworkCollector(BaseFactCollector):
# MAYBE: we could try to build this based on the arch specific implementation of Network() or its kin
name = 'network'
_fact_class = Network
_fact_ids = set(['interfaces',
'default_ipv4',
'default_ipv6',
'all_ipv4_addresses',
'all_ipv6_addresses'])
IPV6_SCOPE = {'0': 'global',
'10': 'host',
'20': 'link',
'40': 'admin',
'50': 'site',
'80': 'organization'}
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
if not module:
return {}
# Network munges cached_facts by side effect, so give it a copy
facts_obj = self._fact_class(module)
facts_dict = facts_obj.populate(collected_facts=collected_facts)
return facts_dict
| gpl-3.0 |
Gillu13/scipy | scipy/fftpack/tests/test_pseudo_diffs.py | 33 | 13624 | #!/usr/bin/env python
# Created by Pearu Peterson, September 2002
from __future__ import division, print_function, absolute_import
__usage__ = """
Build fftpack:
python setup_fftpack.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.fftpack.test(<level>)'
Run tests if fftpack is not installed:
python tests/test_pseudo_diffs.py [<level>]
"""
from numpy.testing import (TestCase, assert_equal, assert_almost_equal,
assert_array_almost_equal, run_module_suite)
from scipy.fftpack import (diff, fft, ifft, tilbert, itilbert, hilbert,
ihilbert, shift, fftfreq, cs_diff, sc_diff,
ss_diff, cc_diff)
import numpy as np
from numpy import arange, sin, cos, pi, exp, tanh, sum, sign
from numpy.random import random
def direct_diff(x,k=1,period=None):
fx = fft(x)
n = len(fx)
if period is None:
period = 2*pi
w = fftfreq(n)*2j*pi/period*n
if k < 0:
w = 1 / w**k
w[0] = 0.0
else:
w = w**k
if n > 2000:
w[250:n-250] = 0.0
return ifft(w*fx).real
def direct_tilbert(x,h=1,period=None):
fx = fft(x)
n = len(fx)
if period is None:
period = 2*pi
w = fftfreq(n)*h*2*pi/period*n
w[0] = 1
w = 1j/tanh(w)
w[0] = 0j
return ifft(w*fx)
def direct_itilbert(x,h=1,period=None):
fx = fft(x)
n = len(fx)
if period is None:
period = 2*pi
w = fftfreq(n)*h*2*pi/period*n
w = -1j*tanh(w)
return ifft(w*fx)
def direct_hilbert(x):
fx = fft(x)
n = len(fx)
w = fftfreq(n)*n
w = 1j*sign(w)
return ifft(w*fx)
def direct_ihilbert(x):
return -direct_hilbert(x)
def direct_shift(x,a,period=None):
n = len(x)
if period is None:
k = fftfreq(n)*1j*n
else:
k = fftfreq(n)*2j*pi/period*n
return ifft(fft(x)*exp(k*a)).real
class TestDiff(TestCase):
def test_definition(self):
for n in [16,17,64,127,32]:
x = arange(n)*2*pi/n
assert_array_almost_equal(diff(sin(x)),direct_diff(sin(x)))
assert_array_almost_equal(diff(sin(x),2),direct_diff(sin(x),2))
assert_array_almost_equal(diff(sin(x),3),direct_diff(sin(x),3))
assert_array_almost_equal(diff(sin(x),4),direct_diff(sin(x),4))
assert_array_almost_equal(diff(sin(x),5),direct_diff(sin(x),5))
assert_array_almost_equal(diff(sin(2*x),3),direct_diff(sin(2*x),3))
assert_array_almost_equal(diff(sin(2*x),4),direct_diff(sin(2*x),4))
assert_array_almost_equal(diff(cos(x)),direct_diff(cos(x)))
assert_array_almost_equal(diff(cos(x),2),direct_diff(cos(x),2))
assert_array_almost_equal(diff(cos(x),3),direct_diff(cos(x),3))
assert_array_almost_equal(diff(cos(x),4),direct_diff(cos(x),4))
assert_array_almost_equal(diff(cos(2*x)),direct_diff(cos(2*x)))
assert_array_almost_equal(diff(sin(x*n/8)),direct_diff(sin(x*n/8)))
assert_array_almost_equal(diff(cos(x*n/8)),direct_diff(cos(x*n/8)))
for k in range(5):
assert_array_almost_equal(diff(sin(4*x),k),direct_diff(sin(4*x),k))
assert_array_almost_equal(diff(cos(4*x),k),direct_diff(cos(4*x),k))
def test_period(self):
for n in [17,64]:
x = arange(n)/float(n)
assert_array_almost_equal(diff(sin(2*pi*x),period=1),
2*pi*cos(2*pi*x))
assert_array_almost_equal(diff(sin(2*pi*x),3,period=1),
-(2*pi)**3*cos(2*pi*x))
def test_sin(self):
for n in [32,64,77]:
x = arange(n)*2*pi/n
assert_array_almost_equal(diff(sin(x)),cos(x))
assert_array_almost_equal(diff(cos(x)),-sin(x))
assert_array_almost_equal(diff(sin(x),2),-sin(x))
assert_array_almost_equal(diff(sin(x),4),sin(x))
assert_array_almost_equal(diff(sin(4*x)),4*cos(4*x))
assert_array_almost_equal(diff(sin(sin(x))),cos(x)*cos(sin(x)))
def test_expr(self):
for n in [64,77,100,128,256,512,1024,2048,4096,8192][:5]:
x = arange(n)*2*pi/n
f = sin(x)*cos(4*x)+exp(sin(3*x))
df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x))
ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\
- 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x))
d1 = diff(f)
assert_array_almost_equal(d1,df)
assert_array_almost_equal(diff(df),ddf)
assert_array_almost_equal(diff(f,2),ddf)
assert_array_almost_equal(diff(ddf,-1),df)
def test_expr_large(self):
for n in [2048,4096]:
x = arange(n)*2*pi/n
f = sin(x)*cos(4*x)+exp(sin(3*x))
df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x))
ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\
- 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x))
assert_array_almost_equal(diff(f),df)
assert_array_almost_equal(diff(df),ddf)
assert_array_almost_equal(diff(ddf,-1),df)
assert_array_almost_equal(diff(f,2),ddf)
def test_int(self):
n = 64
x = arange(n)*2*pi/n
assert_array_almost_equal(diff(sin(x),-1),-cos(x))
assert_array_almost_equal(diff(sin(x),-2),-sin(x))
assert_array_almost_equal(diff(sin(x),-4),sin(x))
assert_array_almost_equal(diff(2*cos(2*x),-1),sin(2*x))
def test_random_even(self):
for k in [0,2,4,6]:
for n in [60,32,64,56,55]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
# zeroing Nyquist mode:
f = diff(diff(f,1),-1)
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(diff(diff(f,k),-k),f)
assert_array_almost_equal(diff(diff(f,-k),k),f)
def test_random_odd(self):
for k in [0,1,2,3,4,5,6]:
for n in [33,65,55]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(diff(diff(f,k),-k),f)
assert_array_almost_equal(diff(diff(f,-k),k),f)
def test_zero_nyquist(self):
for k in [0,1,2,3,4,5,6]:
for n in [32,33,64,56,55]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
# zeroing Nyquist mode:
f = diff(diff(f,1),-1)
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(diff(diff(f,k),-k),f)
assert_array_almost_equal(diff(diff(f,-k),k),f)
class TestTilbert(TestCase):
def test_definition(self):
for h in [0.1,0.5,1,5.5,10]:
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
y = tilbert(sin(x),h)
y1 = direct_tilbert(sin(x),h)
assert_array_almost_equal(y,y1)
assert_array_almost_equal(tilbert(sin(x),h),
direct_tilbert(sin(x),h))
assert_array_almost_equal(tilbert(sin(2*x),h),
direct_tilbert(sin(2*x),h))
def test_random_even(self):
for h in [0.1,0.5,1,5.5,10]:
for n in [32,64,56]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(direct_tilbert(direct_itilbert(f,h),h),f)
def test_random_odd(self):
for h in [0.1,0.5,1,5.5,10]:
for n in [33,65,55]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(itilbert(tilbert(f,h),h),f)
assert_array_almost_equal(tilbert(itilbert(f,h),h),f)
class TestITilbert(TestCase):
def test_definition(self):
for h in [0.1,0.5,1,5.5,10]:
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
y = itilbert(sin(x),h)
y1 = direct_itilbert(sin(x),h)
assert_array_almost_equal(y,y1)
assert_array_almost_equal(itilbert(sin(x),h),
direct_itilbert(sin(x),h))
assert_array_almost_equal(itilbert(sin(2*x),h),
direct_itilbert(sin(2*x),h))
class TestHilbert(TestCase):
def test_definition(self):
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
y = hilbert(sin(x))
y1 = direct_hilbert(sin(x))
assert_array_almost_equal(y,y1)
assert_array_almost_equal(hilbert(sin(2*x)),
direct_hilbert(sin(2*x)))
def test_tilbert_relation(self):
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
f = sin(x)+cos(2*x)*sin(x)
y = hilbert(f)
y1 = direct_hilbert(f)
assert_array_almost_equal(y,y1)
y2 = tilbert(f,h=10)
assert_array_almost_equal(y,y2)
def test_random_odd(self):
for n in [33,65,55]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(ihilbert(hilbert(f)),f)
assert_array_almost_equal(hilbert(ihilbert(f)),f)
def test_random_even(self):
for n in [32,64,56]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
# zeroing Nyquist mode:
f = diff(diff(f,1),-1)
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(direct_hilbert(direct_ihilbert(f)),f)
assert_array_almost_equal(hilbert(ihilbert(f)),f)
class TestIHilbert(TestCase):
def test_definition(self):
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
y = ihilbert(sin(x))
y1 = direct_ihilbert(sin(x))
assert_array_almost_equal(y,y1)
assert_array_almost_equal(ihilbert(sin(2*x)),
direct_ihilbert(sin(2*x)))
def test_itilbert_relation(self):
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
f = sin(x)+cos(2*x)*sin(x)
y = ihilbert(f)
y1 = direct_ihilbert(f)
assert_array_almost_equal(y,y1)
y2 = itilbert(f,h=10)
assert_array_almost_equal(y,y2)
class TestShift(TestCase):
def test_definition(self):
for n in [18,17,64,127,32,2048,256]:
x = arange(n)*2*pi/n
for a in [0.1,3]:
assert_array_almost_equal(shift(sin(x),a),direct_shift(sin(x),a))
assert_array_almost_equal(shift(sin(x),a),sin(x+a))
assert_array_almost_equal(shift(cos(x),a),cos(x+a))
assert_array_almost_equal(shift(cos(2*x)+sin(x),a),
cos(2*(x+a))+sin(x+a))
assert_array_almost_equal(shift(exp(sin(x)),a),exp(sin(x+a)))
assert_array_almost_equal(shift(sin(x),2*pi),sin(x))
assert_array_almost_equal(shift(sin(x),pi),-sin(x))
assert_array_almost_equal(shift(sin(x),pi/2),cos(x))
class TestOverwrite(object):
"""Check input overwrite behavior """
real_dtypes = [np.float32, np.float64]
dtypes = real_dtypes + [np.complex64, np.complex128]
def _check(self, x, routine, *args, **kwargs):
x2 = x.copy()
routine(x2, *args, **kwargs)
sig = routine.__name__
if args:
sig += repr(args)
if kwargs:
sig += repr(kwargs)
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, *args, **kwargs):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
self._check(data, routine, *args, **kwargs)
def test_diff(self):
for dtype in self.dtypes:
self._check_1d(diff, dtype, (16,))
def test_tilbert(self):
for dtype in self.dtypes:
self._check_1d(tilbert, dtype, (16,), 1.6)
def test_itilbert(self):
for dtype in self.dtypes:
self._check_1d(itilbert, dtype, (16,), 1.6)
def test_hilbert(self):
for dtype in self.dtypes:
self._check_1d(hilbert, dtype, (16,))
def test_cs_diff(self):
for dtype in self.dtypes:
self._check_1d(cs_diff, dtype, (16,), 1.0, 4.0)
def test_sc_diff(self):
for dtype in self.dtypes:
self._check_1d(sc_diff, dtype, (16,), 1.0, 4.0)
def test_ss_diff(self):
for dtype in self.dtypes:
self._check_1d(ss_diff, dtype, (16,), 1.0, 4.0)
def test_cc_diff(self):
for dtype in self.dtypes:
self._check_1d(cc_diff, dtype, (16,), 1.0, 4.0)
def test_shift(self):
for dtype in self.dtypes:
self._check_1d(shift, dtype, (16,), 1.0)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
lokirius/python-for-android | python-modules/twisted/twisted/web/test/test_httpauth.py | 49 | 21718 | # Copyright (c) 2009-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web._auth}.
"""
from zope.interface import implements
from zope.interface.verify import verifyObject
from twisted.trial import unittest
from twisted.python.failure import Failure
from twisted.internet.error import ConnectionDone
from twisted.internet.address import IPv4Address
from twisted.cred import error, portal
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.cred.checkers import ANONYMOUS, AllowAnonymousAccess
from twisted.cred.credentials import IUsernamePassword
from twisted.web.iweb import ICredentialFactory
from twisted.web.resource import IResource, Resource, getChildForRequest
from twisted.web._auth import basic, digest
from twisted.web._auth.wrapper import HTTPAuthSessionWrapper, UnauthorizedResource
from twisted.web._auth.basic import BasicCredentialFactory
from twisted.web.server import NOT_DONE_YET
from twisted.web.static import Data
from twisted.web.test.test_web import DummyRequest
def b64encode(s):
return s.encode('base64').strip()
class BasicAuthTestsMixin:
"""
L{TestCase} mixin class which defines a number of tests for
L{basic.BasicCredentialFactory}. Because this mixin defines C{setUp}, it
must be inherited before L{TestCase}.
"""
def setUp(self):
self.request = self.makeRequest()
self.realm = 'foo'
self.username = 'dreid'
self.password = 'S3CuR1Ty'
self.credentialFactory = basic.BasicCredentialFactory(self.realm)
def makeRequest(self, method='GET', clientAddress=None):
"""
Create a request object to be passed to
L{basic.BasicCredentialFactory.decode} along with a response value.
Override this in a subclass.
"""
raise NotImplementedError("%r did not implement makeRequest" % (
self.__class__,))
def test_interface(self):
"""
L{BasicCredentialFactory} implements L{ICredentialFactory}.
"""
self.assertTrue(
verifyObject(ICredentialFactory, self.credentialFactory))
def test_usernamePassword(self):
"""
L{basic.BasicCredentialFactory.decode} turns a base64-encoded response
into a L{UsernamePassword} object with a password which reflects the
one which was encoded in the response.
"""
response = b64encode('%s:%s' % (self.username, self.password))
creds = self.credentialFactory.decode(response, self.request)
self.assertTrue(IUsernamePassword.providedBy(creds))
self.assertTrue(creds.checkPassword(self.password))
self.assertFalse(creds.checkPassword(self.password + 'wrong'))
def test_incorrectPadding(self):
"""
L{basic.BasicCredentialFactory.decode} decodes a base64-encoded
response with incorrect padding.
"""
response = b64encode('%s:%s' % (self.username, self.password))
response = response.strip('=')
creds = self.credentialFactory.decode(response, self.request)
self.assertTrue(verifyObject(IUsernamePassword, creds))
self.assertTrue(creds.checkPassword(self.password))
def test_invalidEncoding(self):
"""
L{basic.BasicCredentialFactory.decode} raises L{LoginFailed} if passed
a response which is not base64-encoded.
"""
response = 'x' # one byte cannot be valid base64 text
self.assertRaises(
error.LoginFailed,
self.credentialFactory.decode, response, self.makeRequest())
def test_invalidCredentials(self):
"""
L{basic.BasicCredentialFactory.decode} raises L{LoginFailed} when
passed a response which is not valid base64-encoded text.
"""
response = b64encode('123abc+/')
self.assertRaises(
error.LoginFailed,
self.credentialFactory.decode,
response, self.makeRequest())
class RequestMixin:
def makeRequest(self, method='GET', clientAddress=None):
"""
Create a L{DummyRequest} (change me to create a
L{twisted.web.http.Request} instead).
"""
request = DummyRequest('/')
request.method = method
request.client = clientAddress
return request
class BasicAuthTestCase(RequestMixin, BasicAuthTestsMixin, unittest.TestCase):
"""
Basic authentication tests which use L{twisted.web.http.Request}.
"""
class DigestAuthTestCase(RequestMixin, unittest.TestCase):
"""
Digest authentication tests which use L{twisted.web.http.Request}.
"""
def setUp(self):
"""
Create a DigestCredentialFactory for testing
"""
self.realm = "test realm"
self.algorithm = "md5"
self.credentialFactory = digest.DigestCredentialFactory(
self.algorithm, self.realm)
self.request = self.makeRequest()
def test_decode(self):
"""
L{digest.DigestCredentialFactory.decode} calls the C{decode} method on
L{twisted.cred.digest.DigestCredentialFactory} with the HTTP method and
host of the request.
"""
host = '169.254.0.1'
method = 'GET'
done = [False]
response = object()
def check(_response, _method, _host):
self.assertEqual(response, _response)
self.assertEqual(method, _method)
self.assertEqual(host, _host)
done[0] = True
self.patch(self.credentialFactory.digest, 'decode', check)
req = self.makeRequest(method, IPv4Address('TCP', host, 81))
self.credentialFactory.decode(response, req)
self.assertTrue(done[0])
def test_interface(self):
"""
L{DigestCredentialFactory} implements L{ICredentialFactory}.
"""
self.assertTrue(
verifyObject(ICredentialFactory, self.credentialFactory))
def test_getChallenge(self):
"""
The challenge issued by L{DigestCredentialFactory.getChallenge} must
include C{'qop'}, C{'realm'}, C{'algorithm'}, C{'nonce'}, and
C{'opaque'} keys. The values for the C{'realm'} and C{'algorithm'}
keys must match the values supplied to the factory's initializer.
None of the values may have newlines in them.
"""
challenge = self.credentialFactory.getChallenge(self.request)
self.assertEquals(challenge['qop'], 'auth')
self.assertEquals(challenge['realm'], 'test realm')
self.assertEquals(challenge['algorithm'], 'md5')
self.assertIn('nonce', challenge)
self.assertIn('opaque', challenge)
for v in challenge.values():
self.assertNotIn('\n', v)
def test_getChallengeWithoutClientIP(self):
"""
L{DigestCredentialFactory.getChallenge} can issue a challenge even if
the L{Request} it is passed returns C{None} from C{getClientIP}.
"""
request = self.makeRequest('GET', None)
challenge = self.credentialFactory.getChallenge(request)
self.assertEqual(challenge['qop'], 'auth')
self.assertEqual(challenge['realm'], 'test realm')
self.assertEqual(challenge['algorithm'], 'md5')
self.assertIn('nonce', challenge)
self.assertIn('opaque', challenge)
class UnauthorizedResourceTests(unittest.TestCase):
"""
Tests for L{UnauthorizedResource}.
"""
def test_getChildWithDefault(self):
"""
An L{UnauthorizedResource} is every child of itself.
"""
resource = UnauthorizedResource([])
self.assertIdentical(
resource.getChildWithDefault("foo", None), resource)
self.assertIdentical(
resource.getChildWithDefault("bar", None), resource)
def test_render(self):
"""
L{UnauthorizedResource} renders with a 401 response code and a
I{WWW-Authenticate} header and puts a simple unauthorized message
into the response body.
"""
resource = UnauthorizedResource([
BasicCredentialFactory('example.com')])
request = DummyRequest([''])
request.render(resource)
self.assertEqual(request.responseCode, 401)
self.assertEqual(
request.responseHeaders.getRawHeaders('www-authenticate'),
['basic realm="example.com"'])
self.assertEqual(request.written, ['Unauthorized'])
def test_renderQuotesRealm(self):
"""
The realm value included in the I{WWW-Authenticate} header set in
the response when L{UnauthorizedResounrce} is rendered has quotes
and backslashes escaped.
"""
resource = UnauthorizedResource([
BasicCredentialFactory('example\\"foo')])
request = DummyRequest([''])
request.render(resource)
self.assertEqual(
request.responseHeaders.getRawHeaders('www-authenticate'),
['basic realm="example\\\\\\"foo"'])
class Realm(object):
"""
A simple L{IRealm} implementation which gives out L{WebAvatar} for any
avatarId.
@type loggedIn: C{int}
@ivar loggedIn: The number of times C{requestAvatar} has been invoked for
L{IResource}.
@type loggedOut: C{int}
@ivar loggedOut: The number of times the logout callback has been invoked.
"""
implements(portal.IRealm)
def __init__(self, avatarFactory):
self.loggedOut = 0
self.loggedIn = 0
self.avatarFactory = avatarFactory
def requestAvatar(self, avatarId, mind, *interfaces):
if IResource in interfaces:
self.loggedIn += 1
return IResource, self.avatarFactory(avatarId), self.logout
raise NotImplementedError()
def logout(self):
self.loggedOut += 1
class HTTPAuthHeaderTests(unittest.TestCase):
"""
Tests for L{HTTPAuthSessionWrapper}.
"""
makeRequest = DummyRequest
def setUp(self):
"""
Create a realm, portal, and L{HTTPAuthSessionWrapper} to use in the tests.
"""
self.username = 'foo bar'
self.password = 'bar baz'
self.avatarContent = "contents of the avatar resource itself"
self.childName = "foo-child"
self.childContent = "contents of the foo child of the avatar"
self.checker = InMemoryUsernamePasswordDatabaseDontUse()
self.checker.addUser(self.username, self.password)
self.avatar = Data(self.avatarContent, 'text/plain')
self.avatar.putChild(
self.childName, Data(self.childContent, 'text/plain'))
self.avatars = {self.username: self.avatar}
self.realm = Realm(self.avatars.get)
self.portal = portal.Portal(self.realm, [self.checker])
self.credentialFactories = []
self.wrapper = HTTPAuthSessionWrapper(
self.portal, self.credentialFactories)
def _authorizedBasicLogin(self, request):
"""
Add an I{basic authorization} header to the given request and then
dispatch it, starting from C{self.wrapper} and returning the resulting
L{IResource}.
"""
authorization = b64encode(self.username + ':' + self.password)
request.headers['authorization'] = 'Basic ' + authorization
return getChildForRequest(self.wrapper, request)
def test_getChildWithDefault(self):
"""
Resource traversal which encounters an L{HTTPAuthSessionWrapper}
results in an L{UnauthorizedResource} instance when the request does
not have the required I{Authorization} headers.
"""
request = self.makeRequest([self.childName])
child = getChildForRequest(self.wrapper, request)
d = request.notifyFinish()
def cbFinished(result):
self.assertEquals(request.responseCode, 401)
d.addCallback(cbFinished)
request.render(child)
return d
def _invalidAuthorizationTest(self, response):
"""
Create a request with the given value as the value of an
I{Authorization} header and perform resource traversal with it,
starting at C{self.wrapper}. Assert that the result is a 401 response
code. Return a L{Deferred} which fires when this is all done.
"""
self.credentialFactories.append(BasicCredentialFactory('example.com'))
request = self.makeRequest([self.childName])
request.headers['authorization'] = response
child = getChildForRequest(self.wrapper, request)
d = request.notifyFinish()
def cbFinished(result):
self.assertEqual(request.responseCode, 401)
d.addCallback(cbFinished)
request.render(child)
return d
def test_getChildWithDefaultUnauthorizedUser(self):
"""
Resource traversal which enouncters an L{HTTPAuthSessionWrapper}
results in an L{UnauthorizedResource} when the request has an
I{Authorization} header with a user which does not exist.
"""
return self._invalidAuthorizationTest('Basic ' + b64encode('foo:bar'))
def test_getChildWithDefaultUnauthorizedPassword(self):
"""
Resource traversal which enouncters an L{HTTPAuthSessionWrapper}
results in an L{UnauthorizedResource} when the request has an
I{Authorization} header with a user which exists and the wrong
password.
"""
return self._invalidAuthorizationTest(
'Basic ' + b64encode(self.username + ':bar'))
def test_getChildWithDefaultUnrecognizedScheme(self):
"""
Resource traversal which enouncters an L{HTTPAuthSessionWrapper}
results in an L{UnauthorizedResource} when the request has an
I{Authorization} header with an unrecognized scheme.
"""
return self._invalidAuthorizationTest('Quux foo bar baz')
def test_getChildWithDefaultAuthorized(self):
"""
Resource traversal which encounters an L{HTTPAuthSessionWrapper}
results in an L{IResource} which renders the L{IResource} avatar
retrieved from the portal when the request has a valid I{Authorization}
header.
"""
self.credentialFactories.append(BasicCredentialFactory('example.com'))
request = self.makeRequest([self.childName])
child = self._authorizedBasicLogin(request)
d = request.notifyFinish()
def cbFinished(ignored):
self.assertEquals(request.written, [self.childContent])
d.addCallback(cbFinished)
request.render(child)
return d
def test_renderAuthorized(self):
"""
Resource traversal which terminates at an L{HTTPAuthSessionWrapper}
and includes correct authentication headers results in the
L{IResource} avatar (not one of its children) retrieved from the
portal being rendered.
"""
self.credentialFactories.append(BasicCredentialFactory('example.com'))
# Request it exactly, not any of its children.
request = self.makeRequest([])
child = self._authorizedBasicLogin(request)
d = request.notifyFinish()
def cbFinished(ignored):
self.assertEquals(request.written, [self.avatarContent])
d.addCallback(cbFinished)
request.render(child)
return d
def test_getChallengeCalledWithRequest(self):
"""
When L{HTTPAuthSessionWrapper} finds an L{ICredentialFactory} to issue
a challenge, it calls the C{getChallenge} method with the request as an
argument.
"""
class DumbCredentialFactory(object):
implements(ICredentialFactory)
scheme = 'dumb'
def __init__(self):
self.requests = []
def getChallenge(self, request):
self.requests.append(request)
return {}
factory = DumbCredentialFactory()
self.credentialFactories.append(factory)
request = self.makeRequest([self.childName])
child = getChildForRequest(self.wrapper, request)
d = request.notifyFinish()
def cbFinished(ignored):
self.assertEqual(factory.requests, [request])
d.addCallback(cbFinished)
request.render(child)
return d
def _logoutTest(self):
"""
Issue a request for an authentication-protected resource using valid
credentials and then return the C{DummyRequest} instance which was
used.
This is a helper for tests about the behavior of the logout
callback.
"""
self.credentialFactories.append(BasicCredentialFactory('example.com'))
class SlowerResource(Resource):
def render(self, request):
return NOT_DONE_YET
self.avatar.putChild(self.childName, SlowerResource())
request = self.makeRequest([self.childName])
child = self._authorizedBasicLogin(request)
request.render(child)
self.assertEquals(self.realm.loggedOut, 0)
return request
def test_logout(self):
"""
The realm's logout callback is invoked after the resource is rendered.
"""
request = self._logoutTest()
request.finish()
self.assertEquals(self.realm.loggedOut, 1)
def test_logoutOnError(self):
"""
The realm's logout callback is also invoked if there is an error
generating the response (for example, if the client disconnects
early).
"""
request = self._logoutTest()
request.processingFailed(
Failure(ConnectionDone("Simulated disconnect")))
self.assertEquals(self.realm.loggedOut, 1)
def test_decodeRaises(self):
"""
Resource traversal which enouncters an L{HTTPAuthSessionWrapper}
results in an L{UnauthorizedResource} when the request has a I{Basic
Authorization} header which cannot be decoded using base64.
"""
self.credentialFactories.append(BasicCredentialFactory('example.com'))
request = self.makeRequest([self.childName])
request.headers['authorization'] = 'Basic decode should fail'
child = getChildForRequest(self.wrapper, request)
self.assertIsInstance(child, UnauthorizedResource)
def test_selectParseResponse(self):
"""
L{HTTPAuthSessionWrapper._selectParseHeader} returns a two-tuple giving
the L{ICredentialFactory} to use to parse the header and a string
containing the portion of the header which remains to be parsed.
"""
basicAuthorization = 'Basic abcdef123456'
self.assertEqual(
self.wrapper._selectParseHeader(basicAuthorization),
(None, None))
factory = BasicCredentialFactory('example.com')
self.credentialFactories.append(factory)
self.assertEqual(
self.wrapper._selectParseHeader(basicAuthorization),
(factory, 'abcdef123456'))
def test_unexpectedDecodeError(self):
"""
Any unexpected exception raised by the credential factory's C{decode}
method results in a 500 response code and causes the exception to be
logged.
"""
class UnexpectedException(Exception):
pass
class BadFactory(object):
scheme = 'bad'
def getChallenge(self, client):
return {}
def decode(self, response, request):
raise UnexpectedException()
self.credentialFactories.append(BadFactory())
request = self.makeRequest([self.childName])
request.headers['authorization'] = 'Bad abc'
child = getChildForRequest(self.wrapper, request)
request.render(child)
self.assertEqual(request.responseCode, 500)
self.assertEqual(len(self.flushLoggedErrors(UnexpectedException)), 1)
def test_unexpectedLoginError(self):
"""
Any unexpected failure from L{Portal.login} results in a 500 response
code and causes the failure to be logged.
"""
class UnexpectedException(Exception):
pass
class BrokenChecker(object):
credentialInterfaces = (IUsernamePassword,)
def requestAvatarId(self, credentials):
raise UnexpectedException()
self.portal.registerChecker(BrokenChecker())
self.credentialFactories.append(BasicCredentialFactory('example.com'))
request = self.makeRequest([self.childName])
child = self._authorizedBasicLogin(request)
request.render(child)
self.assertEqual(request.responseCode, 500)
self.assertEqual(len(self.flushLoggedErrors(UnexpectedException)), 1)
def test_anonymousAccess(self):
"""
Anonymous requests are allowed if a L{Portal} has an anonymous checker
registered.
"""
unprotectedContents = "contents of the unprotected child resource"
self.avatars[ANONYMOUS] = Resource()
self.avatars[ANONYMOUS].putChild(
self.childName, Data(unprotectedContents, 'text/plain'))
self.portal.registerChecker(AllowAnonymousAccess())
self.credentialFactories.append(BasicCredentialFactory('example.com'))
request = self.makeRequest([self.childName])
child = getChildForRequest(self.wrapper, request)
d = request.notifyFinish()
def cbFinished(ignored):
self.assertEquals(request.written, [unprotectedContents])
d.addCallback(cbFinished)
request.render(child)
return d
| apache-2.0 |
ubiar/odoo | addons/hr_expense/hr_expense.py | 233 | 24907 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
def _employee_get(obj, cr, uid, context=None):
if context is None:
context = {}
ids = obj.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
if ids:
return ids[0]
return False
class hr_expense_expense(osv.osv):
def _amount(self, cr, uid, ids, field_name, arg, context=None):
res= {}
for expense in self.browse(cr, uid, ids, context=context):
total = 0.0
for line in expense.line_ids:
total += line.unit_amount * line.unit_quantity
res[expense.id] = total
return res
def _get_expense_from_line(self, cr, uid, ids, context=None):
return [line.expense_id.id for line in self.pool.get('hr.expense.line').browse(cr, uid, ids, context=context)]
def _get_currency(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, [uid], context=context)[0]
return user.company_id.currency_id.id
_name = "hr.expense.expense"
_inherit = ['mail.thread']
_description = "Expense"
_order = "id desc"
_track = {
'state': {
'hr_expense.mt_expense_approved': lambda self, cr, uid, obj, ctx=None: obj.state == 'accepted',
'hr_expense.mt_expense_refused': lambda self, cr, uid, obj, ctx=None: obj.state == 'cancelled',
'hr_expense.mt_expense_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state == 'confirm',
},
}
_columns = {
'name': fields.char('Description', required=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'id': fields.integer('Sheet ID', readonly=True),
'date': fields.date('Date', select=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'journal_id': fields.many2one('account.journal', 'Force Journal', help = "The journal used when the expense is done."),
'employee_id': fields.many2one('hr.employee', "Employee", required=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'user_id': fields.many2one('res.users', 'User', required=True),
'date_confirm': fields.date('Confirmation Date', select=True, copy=False,
help="Date of the confirmation of the sheet expense. It's filled when the button Confirm is pressed."),
'date_valid': fields.date('Validation Date', select=True, copy=False,
help="Date of the acceptation of the sheet expense. It's filled when the button Accept is pressed."),
'user_valid': fields.many2one('res.users', 'Validation By', readonly=True, copy=False,
states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'account_move_id': fields.many2one('account.move', 'Ledger Posting', copy=False),
'line_ids': fields.one2many('hr.expense.line', 'expense_id', 'Expense Lines', copy=True,
readonly=True, states={'draft':[('readonly',False)]} ),
'note': fields.text('Note'),
'amount': fields.function(_amount, string='Total Amount', digits_compute=dp.get_precision('Account'),
store={
'hr.expense.line': (_get_expense_from_line, ['unit_amount','unit_quantity'], 10)
}),
'currency_id': fields.many2one('res.currency', 'Currency', required=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'department_id':fields.many2one('hr.department','Department', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'company_id': fields.many2one('res.company', 'Company', required=True),
'state': fields.selection([
('draft', 'New'),
('cancelled', 'Refused'),
('confirm', 'Waiting Approval'),
('accepted', 'Approved'),
('done', 'Waiting Payment'),
('paid', 'Paid'),
],
'Status', readonly=True, track_visibility='onchange', copy=False,
help='When the expense request is created the status is \'Draft\'.\n It is confirmed by the user and request is sent to admin, the status is \'Waiting Confirmation\'.\
\nIf the admin accepts it, the status is \'Accepted\'.\n If the accounting entries are made for the expense request, the status is \'Waiting Payment\'.'),
}
_defaults = {
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'hr.employee', context=c),
'date': fields.date.context_today,
'state': 'draft',
'employee_id': _employee_get,
'user_id': lambda cr, uid, id, c={}: id,
'currency_id': _get_currency,
}
def unlink(self, cr, uid, ids, context=None):
for rec in self.browse(cr, uid, ids, context=context):
if rec.state != 'draft':
raise osv.except_osv(_('Warning!'),_('You can only delete draft expenses!'))
return super(hr_expense_expense, self).unlink(cr, uid, ids, context)
def onchange_currency_id(self, cr, uid, ids, currency_id=False, company_id=False, context=None):
res = {'value': {'journal_id': False}}
journal_ids = self.pool.get('account.journal').search(cr, uid, [('type','=','purchase'), ('currency','=',currency_id), ('company_id', '=', company_id)], context=context)
if journal_ids:
res['value']['journal_id'] = journal_ids[0]
return res
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
emp_obj = self.pool.get('hr.employee')
department_id = False
company_id = False
if employee_id:
employee = emp_obj.browse(cr, uid, employee_id, context=context)
department_id = employee.department_id.id
company_id = employee.company_id.id
return {'value': {'department_id': department_id, 'company_id': company_id}}
def expense_confirm(self, cr, uid, ids, context=None):
for expense in self.browse(cr, uid, ids):
if expense.employee_id and expense.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [expense.id], user_ids=[expense.employee_id.parent_id.user_id.id])
return self.write(cr, uid, ids, {'state': 'confirm', 'date_confirm': time.strftime('%Y-%m-%d')}, context=context)
def expense_accept(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'accepted', 'date_valid': time.strftime('%Y-%m-%d'), 'user_valid': uid}, context=context)
def expense_canceled(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancelled'}, context=context)
def account_move_get(self, cr, uid, expense_id, context=None):
'''
This method prepare the creation of the account move related to the given expense.
:param expense_id: Id of expense for which we are creating account_move.
:return: mapping between fieldname and value of account move to create
:rtype: dict
'''
journal_obj = self.pool.get('account.journal')
expense = self.browse(cr, uid, expense_id, context=context)
company_id = expense.company_id.id
date = expense.date_confirm
ref = expense.name
journal_id = False
if expense.journal_id:
journal_id = expense.journal_id.id
else:
journal_id = journal_obj.search(cr, uid, [('type', '=', 'purchase'), ('company_id', '=', company_id)])
if not journal_id:
raise osv.except_osv(_('Error!'), _("No expense journal found. Please make sure you have a journal with type 'purchase' configured."))
journal_id = journal_id[0]
return self.pool.get('account.move').account_move_prepare(cr, uid, journal_id, date=date, ref=ref, company_id=company_id, context=context)
def line_get_convert(self, cr, uid, x, part, date, context=None):
partner_id = self.pool.get('res.partner')._find_accounting_partner(part).id
return {
'date_maturity': x.get('date_maturity', False),
'partner_id': partner_id,
'name': x['name'][:64],
'date': date,
'debit': x['price']>0 and x['price'],
'credit': x['price']<0 and -x['price'],
'account_id': x['account_id'],
'analytic_lines': x.get('analytic_lines', False),
'amount_currency': x['price']>0 and abs(x.get('amount_currency', False)) or -abs(x.get('amount_currency', False)),
'currency_id': x.get('currency_id', False),
'tax_code_id': x.get('tax_code_id', False),
'tax_amount': x.get('tax_amount', False),
'ref': x.get('ref', False),
'quantity': x.get('quantity',1.00),
'product_id': x.get('product_id', False),
'product_uom_id': x.get('uos_id', False),
'analytic_account_id': x.get('account_analytic_id', False),
}
def compute_expense_totals(self, cr, uid, exp, company_currency, ref, account_move_lines, context=None):
'''
internal method used for computation of total amount of an expense in the company currency and
in the expense currency, given the account_move_lines that will be created. It also do some small
transformations at these account_move_lines (for multi-currency purposes)
:param account_move_lines: list of dict
:rtype: tuple of 3 elements (a, b ,c)
a: total in company currency
b: total in hr.expense currency
c: account_move_lines potentially modified
'''
cur_obj = self.pool.get('res.currency')
context = dict(context or {}, date=exp.date_confirm or time.strftime('%Y-%m-%d'))
total = 0.0
total_currency = 0.0
for i in account_move_lines:
if exp.currency_id.id != company_currency:
i['currency_id'] = exp.currency_id.id
i['amount_currency'] = i['price']
i['price'] = cur_obj.compute(cr, uid, exp.currency_id.id,
company_currency, i['price'],
context=context)
else:
i['amount_currency'] = False
i['currency_id'] = False
total -= i['price']
total_currency -= i['amount_currency'] or i['price']
return total, total_currency, account_move_lines
def action_move_create(self, cr, uid, ids, context=None):
'''
main function that is called when trying to create the accounting entries related to an expense
'''
move_obj = self.pool.get('account.move')
for exp in self.browse(cr, uid, ids, context=context):
if not exp.employee_id.address_home_id:
raise osv.except_osv(_('Error!'), _('The employee must have a home address.'))
if not exp.employee_id.address_home_id.property_account_payable.id:
raise osv.except_osv(_('Error!'), _('The employee must have a payable account set on his home address.'))
company_currency = exp.company_id.currency_id.id
diff_currency_p = exp.currency_id.id <> company_currency
#create the move that will contain the accounting entries
move_id = move_obj.create(cr, uid, self.account_move_get(cr, uid, exp.id, context=context), context=context)
#one account.move.line per expense line (+taxes..)
eml = self.move_line_get(cr, uid, exp.id, context=context)
#create one more move line, a counterline for the total on payable account
total, total_currency, eml = self.compute_expense_totals(cr, uid, exp, company_currency, exp.name, eml, context=context)
acc = exp.employee_id.address_home_id.property_account_payable.id
eml.append({
'type': 'dest',
'name': '/',
'price': total,
'account_id': acc,
'date_maturity': exp.date_confirm,
'amount_currency': diff_currency_p and total_currency or False,
'currency_id': diff_currency_p and exp.currency_id.id or False,
'ref': exp.name
})
#convert eml into an osv-valid format
lines = map(lambda x:(0,0,self.line_get_convert(cr, uid, x, exp.employee_id.address_home_id, exp.date_confirm, context=context)), eml)
journal_id = move_obj.browse(cr, uid, move_id, context).journal_id
# post the journal entry if 'Skip 'Draft' State for Manual Entries' is checked
if journal_id.entry_posted:
move_obj.button_validate(cr, uid, [move_id], context)
move_obj.write(cr, uid, [move_id], {'line_id': lines}, context=context)
self.write(cr, uid, ids, {'account_move_id': move_id, 'state': 'done'}, context=context)
return True
def move_line_get(self, cr, uid, expense_id, context=None):
res = []
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
if context is None:
context = {}
exp = self.browse(cr, uid, expense_id, context=context)
company_currency = exp.company_id.currency_id.id
for line in exp.line_ids:
mres = self.move_line_get_item(cr, uid, line, context)
if not mres:
continue
res.append(mres)
#Calculate tax according to default tax on product
taxes = []
#Taken from product_id_onchange in account.invoice
if line.product_id:
fposition_id = False
fpos_obj = self.pool.get('account.fiscal.position')
fpos = fposition_id and fpos_obj.browse(cr, uid, fposition_id, context=context) or False
product = line.product_id
taxes = product.supplier_taxes_id
#If taxes are not related to the product, maybe they are in the account
if not taxes:
a = product.property_account_expense.id #Why is not there a check here?
if not a:
a = product.categ_id.property_account_expense_categ.id
a = fpos_obj.map_account(cr, uid, fpos, a)
taxes = a and self.pool.get('account.account').browse(cr, uid, a, context=context).tax_ids or False
if not taxes:
continue
tax_l = []
base_tax_amount = line.total_amount
#Calculating tax on the line and creating move?
for tax in tax_obj.compute_all(cr, uid, taxes,
line.unit_amount ,
line.unit_quantity, line.product_id,
exp.user_id.partner_id)['taxes']:
tax_code_id = tax['base_code_id']
if not tax_code_id:
continue
res[-1]['tax_code_id'] = tax_code_id
##
is_price_include = tax_obj.read(cr,uid,tax['id'],['price_include'],context)['price_include']
if is_price_include:
## We need to deduce the price for the tax
res[-1]['price'] = res[-1]['price'] - tax['amount']
# tax amount countains base amount without the tax
base_tax_amount = (base_tax_amount - tax['amount']) * tax['base_sign']
else:
base_tax_amount = base_tax_amount * tax['base_sign']
assoc_tax = {
'type':'tax',
'name':tax['name'],
'price_unit': tax['price_unit'],
'quantity': 1,
'price': tax['amount'] or 0.0,
'account_id': tax['account_collected_id'] or mres['account_id'],
'tax_code_id': tax['tax_code_id'],
'tax_amount': tax['amount'] * tax['base_sign'],
}
tax_l.append(assoc_tax)
res[-1]['tax_amount'] = cur_obj.compute(cr, uid, exp.currency_id.id, company_currency, base_tax_amount, context={'date': exp.date_confirm})
res += tax_l
return res
def move_line_get_item(self, cr, uid, line, context=None):
company = line.expense_id.company_id
property_obj = self.pool.get('ir.property')
if line.product_id:
acc = line.product_id.property_account_expense
if not acc:
acc = line.product_id.categ_id.property_account_expense_categ
if not acc:
raise osv.except_osv(_('Error!'), _('No purchase account found for the product %s (or for his category), please configure one.') % (line.product_id.name))
else:
acc = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category', context={'force_company': company.id})
if not acc:
raise osv.except_osv(_('Error!'), _('Please configure Default Expense account for Product purchase: `property_account_expense_categ`.'))
return {
'type':'src',
'name': line.name.split('\n')[0][:64],
'price_unit':line.unit_amount,
'quantity':line.unit_quantity,
'price':line.total_amount,
'account_id':acc.id,
'product_id':line.product_id.id,
'uos_id':line.uom_id.id,
'account_analytic_id':line.analytic_account.id,
}
def action_view_move(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing account.move of given expense ids.
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
expense = self.browse(cr, uid, ids[0], context=context)
assert expense.account_move_id
try:
dummy, view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'view_move_form')
except ValueError, e:
view_id = False
result = {
'name': _('Expense Account Move'),
'view_type': 'form',
'view_mode': 'form',
'view_id': view_id,
'res_model': 'account.move',
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': expense.account_move_id.id,
}
return result
class product_template(osv.osv):
_inherit = "product.template"
_columns = {
'hr_expense_ok': fields.boolean('Can be Expensed', help="Specify if the product can be selected in an HR expense line."),
}
class hr_expense_line(osv.osv):
_name = "hr.expense.line"
_description = "Expense Line"
def _amount(self, cr, uid, ids, field_name, arg, context=None):
if not ids:
return {}
cr.execute("SELECT l.id,COALESCE(SUM(l.unit_amount*l.unit_quantity),0) AS amount FROM hr_expense_line l WHERE id IN %s GROUP BY l.id ",(tuple(ids),))
res = dict(cr.fetchall())
return res
def _get_uom_id(self, cr, uid, context=None):
result = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result and result[1] or False
_columns = {
'name': fields.char('Expense Note', required=True),
'date_value': fields.date('Date', required=True),
'expense_id': fields.many2one('hr.expense.expense', 'Expense', ondelete='cascade', select=True),
'total_amount': fields.function(_amount, string='Total', digits_compute=dp.get_precision('Account')),
'unit_amount': fields.float('Unit Price', digits_compute=dp.get_precision('Product Price')),
'unit_quantity': fields.float('Quantities', digits_compute= dp.get_precision('Product Unit of Measure')),
'product_id': fields.many2one('product.product', 'Product', domain=[('hr_expense_ok','=',True)]),
'uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True),
'description': fields.text('Description'),
'analytic_account': fields.many2one('account.analytic.account','Analytic account'),
'ref': fields.char('Reference'),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of expense lines."),
}
_defaults = {
'unit_quantity': 1,
'date_value': lambda *a: time.strftime('%Y-%m-%d'),
'uom_id': _get_uom_id,
}
_order = "sequence, date_value desc"
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
res = {}
if product_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
res['name'] = product.name
amount_unit = product.price_get('standard_price')[product.id]
res['unit_amount'] = amount_unit
res['uom_id'] = product.uom_id.id
return {'value': res}
def onchange_uom(self, cr, uid, ids, product_id, uom_id, context=None):
res = {'value':{}}
if not uom_id or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, uom_id, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure')}
res['value'].update({'uom_id': product.uom_id.id})
return res
class account_move_line(osv.osv):
_inherit = "account.move.line"
def reconcile(self, cr, uid, ids, type='auto', writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False, context=None):
res = super(account_move_line, self).reconcile(cr, uid, ids, type=type, writeoff_acc_id=writeoff_acc_id, writeoff_period_id=writeoff_period_id, writeoff_journal_id=writeoff_journal_id, context=context)
#when making a full reconciliation of account move lines 'ids', we may need to recompute the state of some hr.expense
account_move_ids = [aml.move_id.id for aml in self.browse(cr, uid, ids, context=context)]
expense_obj = self.pool.get('hr.expense.expense')
currency_obj = self.pool.get('res.currency')
if account_move_ids:
expense_ids = expense_obj.search(cr, uid, [('account_move_id', 'in', account_move_ids)], context=context)
for expense in expense_obj.browse(cr, uid, expense_ids, context=context):
if expense.state == 'done':
#making the postulate it has to be set paid, then trying to invalidate it
new_status_is_paid = True
for aml in expense.account_move_id.line_id:
if aml.account_id.type == 'payable' and not currency_obj.is_zero(cr, uid, expense.company_id.currency_id, aml.amount_residual):
new_status_is_paid = False
if new_status_is_paid:
expense_obj.write(cr, uid, [expense.id], {'state': 'paid'}, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mdrumond/tensorflow | tensorflow/contrib/signal/python/ops/mfcc_ops.py | 18 | 4705 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mel-Frequency Cepstral Coefficients (MFCCs) ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops
def mfccs_from_log_mel_spectrograms(log_mel_spectrograms, name=None):
"""Computes [MFCCs][mfcc] of `log_mel_spectrograms`.
Implemented with GPU-compatible ops and supports gradients.
[Mel-Frequency Cepstral Coefficient (MFCC)][mfcc] calculation consists of
taking the DCT-II of a log-magnitude mel-scale spectrogram. [HTK][htk]'s MFCCs
use a particular scaling of the DCT-II which is almost orthogonal
normalization. We follow this convention.
All `num_mel_bins` MFCCs are returned and it is up to the caller to select
a subset of the MFCCs based on their application. For example, it is typical
to only use the first few for speech recognition, as this results in
an approximately pitch-invariant representation of the signal.
For example:
```python
sample_rate = 16000.0
# A Tensor of [batch_size, num_samples] mono PCM samples in the range [-1, 1].
pcm = tf.placeholder(tf.float32, [None, None])
# A 1024-point STFT with frames of 64 ms and 75% overlap.
stfts = tf.contrib.signal.stft(pcm, frame_length=1024, frame_step=256,
fft_length=1024)
spectrograms = tf.abs(stft)
# Warp the linear scale spectrograms into the mel-scale.
num_spectrogram_bins = stfts.shape[-1].value
lower_edge_hertz, upper_edge_hertz, num_mel_bins = 80.0, 7600.0, 80
linear_to_mel_weight_matrix = tf.contrib.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz)
mel_spectrograms = tf.tensordot(
spectrograms, linear_to_mel_weight_matrix, 1)
mel_spectrograms.set_shape(spectrograms.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]))
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
log_mel_spectrograms = tf.log(mel_spectrograms + 1e-6)
# Compute MFCCs from log_mel_spectrograms and take the first 13.
mfccs = tf.contrib.signal.mfccs_from_log_mel_spectrograms(
log_mel_spectrograms)[..., :13]
```
Args:
log_mel_spectrograms: A `[..., num_mel_bins]` `float32` `Tensor` of
log-magnitude mel-scale spectrograms.
name: An optional name for the operation.
Returns:
A `[..., num_mel_bins]` `float32` `Tensor` of the MFCCs of
`log_mel_spectrograms`.
Raises:
ValueError: If `num_mel_bins` is not positive.
[mfcc]: https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
[htk]: https://en.wikipedia.org/wiki/HTK_(software)
"""
with ops.name_scope(name, 'mfccs_from_log_mel_spectrograms',
[log_mel_spectrograms]):
# Compute the DCT-II of the resulting log-magnitude mel-scale spectrogram.
# The DCT used in HTK scales every basis vector by sqrt(2/N), which is the
# scaling required for an "orthogonal" DCT-II *except* in the 0th bin, where
# the true orthogonal DCT (as implemented by scipy) scales by sqrt(1/N). For
# this reason, we don't apply orthogonal normalization and scale the DCT by
# `0.5 * sqrt(2/N)` manually.
log_mel_spectrograms = ops.convert_to_tensor(log_mel_spectrograms,
dtype=dtypes.float32)
if (log_mel_spectrograms.shape.ndims and
log_mel_spectrograms.shape[-1].value is not None):
num_mel_bins = log_mel_spectrograms.shape[-1].value
if num_mel_bins == 0:
raise ValueError('num_mel_bins must be positive. Got: %s' %
log_mel_spectrograms)
else:
num_mel_bins = array_ops.shape(log_mel_spectrograms)[-1]
dct2 = spectral_ops.dct(log_mel_spectrograms)
return dct2 * math_ops.rsqrt(num_mel_bins * 2.0)
| apache-2.0 |
tpounds/ansible-modules-core | windows/win_lineinfile.py | 66 | 6297 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: win_lineinfile
author: "Brian Lloyd <brian.d.lloyd@gmail.com>"
short_description: Ensure a particular line is in a file, or replace an existing line using a back-referenced regular expression.
description:
- This module will search a file for a line, and ensure that it is present or absent.
- This is primarily useful when you want to change a single line in a file only.
version_added: "2.0"
options:
dest:
required: true
aliases: [ name, destfile ]
description:
- The path of the file to modify.
regexp:
required: false
description:
- "The regular expression to look for in every line of the file. For C(state=present), the pattern to replace if found; only the last line found will be replaced. For C(state=absent), the pattern of the line to remove. Uses .NET compatible regular expressions; see U(https://msdn.microsoft.com/en-us/library/hs600312%28v=vs.110%29.aspx)."
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the line should be there or not.
line:
required: false
description:
- Required for C(state=present). The line to insert/replace into the file. If C(backrefs) is set, may contain backreferences that will get expanded with the C(regexp) capture groups if the regexp matches.
backrefs:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Used with C(state=present). If set, line can contain backreferences (both positional and named) that will get populated if the C(regexp) matches. This flag changes the operation of the module slightly; C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp) doesn't match anywhere in the file, the file will be left unchanged.
- If the C(regexp) does match, the last matching line will be replaced by the expanded line parameter.
insertafter:
required: false
default: EOF
description:
- Used with C(state=present). If specified, the line will be inserted after the last match of specified regular expression. A special value is available; C(EOF) for inserting the line at the end of the file.
- If specified regular expresion has no matches, EOF will be used instead. May not be used with C(backrefs).
choices: [ 'EOF', '*regex*' ]
insertbefore:
required: false
description:
- Used with C(state=present). If specified, the line will be inserted before the last match of specified regular expression. A value is available; C(BOF) for inserting the line at the beginning of the file.
- If specified regular expresion has no matches, the line will be inserted at the end of the file. May not be used with C(backrefs).
choices: [ 'BOF', '*regex*' ]
create:
required: false
choices: [ "yes", "no" ]
default: "no"
description:
- Used with C(state=present). If specified, the file will be created if it does not already exist. By default it will fail if the file is missing.
backup:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
validate:
required: false
description:
- Validation to run before copying into place. Use %s in the command to indicate the current file to validate.
- The command is passed securely so shell features like expansion and pipes won't work.
required: false
default: None
encoding:
required: false
default: "auto"
description:
- Specifies the encoding of the source text file to operate on (and thus what the output encoding will be). The default of C(auto) will cause the module to auto-detect the encoding of the source file and ensure that the modified file is written with the same encoding.
- "An explicit encoding can be passed as a string that is a valid value to pass to the .NET framework System.Text.Encoding.GetEncoding() method - see U(https://msdn.microsoft.com/en-us/library/system.text.encoding%28v=vs.110%29.aspx)."
- This is mostly useful with C(create=yes) if you want to create a new file with a specific encoding. If C(create=yes) is specified without a specific encoding, the default encoding (UTF-8, no BOM) will be used.
newline:
required: false
description:
- "Specifies the line separator style to use for the modified file. This defaults to the windows line separator (\r\n). Note that the indicated line separator will be used for file output regardless of the original line seperator that appears in the input file."
choices: [ "windows", "unix" ]
default: "windows"
"""
EXAMPLES = """
- win_lineinfile: dest=C:\\temp\\example.conf regexp=^name= line="name=JohnDoe"
- win_lineinfile: dest=C:\\temp\\example.conf state=absent regexp="^name="
- win_lineinfile: dest=C:\\temp\\example.conf regexp='^127\.0\.0\.1' line='127.0.0.1 localhost'
- win_lineinfile: dest=C:\\temp\\httpd.conf regexp="^Listen " insertafter="^#Listen " line="Listen 8080"
- win_lineinfile: dest=C:\\temp\\services regexp="^# port for http" insertbefore="^www.*80/tcp" line="# port for http by default"
# Create file if it doesnt exist with a specific encoding
- win_lineinfile: dest=C:\\temp\\utf16.txt create="yes" encoding="utf-16" line="This is a utf-16 encoded file"
# Add a line to a file and ensure the resulting file uses unix line separators
- win_lineinfile: dest=C:\\temp\\testfile.txt line="Line added to file" newline="unix"
"""
| gpl-3.0 |
vishnu-kumar/PeformanceFramework | rally_os/common/costilius.py | 20 | 3888 | #
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
This module is a storage for different types of workarounds.
"""
from distutils import spawn
import os
import subprocess
import sys
from rally.common.i18n import _LE
from rally import exceptions
try:
from collections import OrderedDict # noqa
except ImportError:
# NOTE(andreykurilin): Python 2.6 issue. OrderedDict is not
# present in `collections` library.
from ordereddict import OrderedDict # noqa
def is_py26():
return sys.version_info[:2] == (2, 6)
if is_py26():
import simplejson as json
else:
import json
def json_loads(*args, **kwargs):
"""Deserialize a str or unicode instance to a Python object.
'simplejson' is used in Python 2.6 environment, because standard 'json'
library not include several important features(for example
'object_pairs_hook', which allows to deserialize input object to
OrderedDict)
"""
return json.loads(*args, **kwargs)
def sp_check_output(*popenargs, **kwargs):
"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor.
"""
if is_py26():
# NOTE(andreykurilin): as I said before, support python 26 env is hard
# task. Subprocess supports check_output function from Python 2.7, so
# let's copy-paste code of this function from it.
if "stdout" in kwargs:
raise ValueError("stdout argument not allowed, "
"it will be overridden.")
process = subprocess.Popen(stdout=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
return subprocess.check_output(*popenargs, **kwargs)
def get_interpreter(python_version):
"""Discovers PATH to find proper python interpreter
:param python_version: (major, minor) version numbers
:type python_version: tuple
"""
if not isinstance(python_version, tuple):
msg = (_LE("given format of python version `%s` is invalid") %
python_version)
raise exceptions.InvalidArgumentsException(msg)
interpreter_name = "python%s.%s" % python_version
interpreter = spawn.find_executable(interpreter_name)
if interpreter:
return interpreter
else:
interpreters = filter(
os.path.isfile, [os.path.join(p, interpreter_name)
for p in os.environ.get("PATH", "").split(":")])
cmd = "%s -c 'import sys; print(sys.version_info[:2])'"
for interpreter in interpreters:
try:
out = sp_check_output(cmd % interpreter, shell=True)
except subprocess.CalledProcessError:
pass
else:
if out.strip() == str(python_version):
return interpreter
| apache-2.0 |
nightjean/Deep-Learning | tensorflow/python/training/training.py | 21 | 9445 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for training models.
See the @{$python/train} guide.
@@Optimizer
@@GradientDescentOptimizer
@@AdadeltaOptimizer
@@AdagradOptimizer
@@AdagradDAOptimizer
@@MomentumOptimizer
@@AdamOptimizer
@@FtrlOptimizer
@@ProximalGradientDescentOptimizer
@@ProximalAdagradOptimizer
@@RMSPropOptimizer
@@gradients
@@AggregationMethod
@@stop_gradient
@@hessians
@@clip_by_value
@@clip_by_norm
@@clip_by_average_norm
@@clip_by_global_norm
@@global_norm
@@exponential_decay
@@inverse_time_decay
@@natural_exp_decay
@@piecewise_constant
@@polynomial_decay
@@ExponentialMovingAverage
@@Coordinator
@@QueueRunner
@@LooperThread
@@add_queue_runner
@@start_queue_runners
@@Server
@@Supervisor
@@SessionManager
@@ClusterSpec
@@replica_device_setter
@@MonitoredTrainingSession
@@MonitoredSession
@@SingularMonitoredSession
@@Scaffold
@@SessionCreator
@@ChiefSessionCreator
@@WorkerSessionCreator
@@summary_iterator
@@SessionRunHook
@@SessionRunArgs
@@SessionRunContext
@@SessionRunValues
@@LoggingTensorHook
@@StopAtStepHook
@@CheckpointSaverHook
@@CheckpointSaverListener
@@NewCheckpointReader
@@StepCounterHook
@@NanLossDuringTrainingError
@@NanTensorHook
@@SummarySaverHook
@@GlobalStepWaiterHook
@@FinalOpsHook
@@FeedFnHook
@@SecondOrStepTimer
@@global_step
@@basic_train_loop
@@get_global_step
@@get_or_create_global_step
@@create_global_step
@@assert_global_step
@@write_graph
"""
# Optimizers.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys as _sys
from tensorflow.python.ops import io_ops as _io_ops
from tensorflow.python.ops import sdca_ops as _sdca_ops
from tensorflow.python.ops import state_ops as _state_ops
from tensorflow.python.util.all_util import remove_undocumented
# pylint: disable=g-bad-import-order,unused-import
from tensorflow.python.ops.sdca_ops import sdca_optimizer
from tensorflow.python.ops.sdca_ops import sdca_fprint
from tensorflow.python.ops.sdca_ops import sdca_shrink_l1
from tensorflow.python.training.adadelta import AdadeltaOptimizer
from tensorflow.python.training.adagrad import AdagradOptimizer
from tensorflow.python.training.adagrad_da import AdagradDAOptimizer
from tensorflow.python.training.proximal_adagrad import ProximalAdagradOptimizer
from tensorflow.python.training.adam import AdamOptimizer
from tensorflow.python.training.ftrl import FtrlOptimizer
from tensorflow.python.training.momentum import MomentumOptimizer
from tensorflow.python.training.moving_averages import ExponentialMovingAverage
from tensorflow.python.training.optimizer import Optimizer
from tensorflow.python.training.rmsprop import RMSPropOptimizer
from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
from tensorflow.python.training.proximal_gradient_descent import ProximalGradientDescentOptimizer
from tensorflow.python.training.sync_replicas_optimizer import SyncReplicasOptimizer
# Utility classes for training.
from tensorflow.python.training.coordinator import Coordinator
from tensorflow.python.training.coordinator import LooperThread
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.training.queue_runner import *
# For the module level doc.
from tensorflow.python.training import input as _input
from tensorflow.python.training.input import *
# pylint: enable=wildcard-import
from tensorflow.python.training.basic_session_run_hooks import SecondOrStepTimer
from tensorflow.python.training.basic_session_run_hooks import LoggingTensorHook
from tensorflow.python.training.basic_session_run_hooks import StopAtStepHook
from tensorflow.python.training.basic_session_run_hooks import CheckpointSaverHook
from tensorflow.python.training.basic_session_run_hooks import CheckpointSaverListener
from tensorflow.python.training.basic_session_run_hooks import StepCounterHook
from tensorflow.python.training.basic_session_run_hooks import NanLossDuringTrainingError
from tensorflow.python.training.basic_session_run_hooks import NanTensorHook
from tensorflow.python.training.basic_session_run_hooks import SummarySaverHook
from tensorflow.python.training.basic_session_run_hooks import GlobalStepWaiterHook
from tensorflow.python.training.basic_session_run_hooks import FinalOpsHook
from tensorflow.python.training.basic_session_run_hooks import FeedFnHook
from tensorflow.python.training.basic_loops import basic_train_loop
from tensorflow.python.training.device_setter import replica_device_setter
from tensorflow.python.training.monitored_session import Scaffold
from tensorflow.python.training.monitored_session import MonitoredTrainingSession
from tensorflow.python.training.monitored_session import SessionCreator
from tensorflow.python.training.monitored_session import ChiefSessionCreator
from tensorflow.python.training.monitored_session import WorkerSessionCreator
from tensorflow.python.training.monitored_session import MonitoredSession
from tensorflow.python.training.monitored_session import SingularMonitoredSession
from tensorflow.python.training.saver import Saver
from tensorflow.python.training.saver import checkpoint_exists
from tensorflow.python.training.saver import generate_checkpoint_state_proto
from tensorflow.python.training.saver import get_checkpoint_mtimes
from tensorflow.python.training.saver import get_checkpoint_state
from tensorflow.python.training.saver import latest_checkpoint
from tensorflow.python.training.saver import update_checkpoint_state
from tensorflow.python.training.saver import export_meta_graph
from tensorflow.python.training.saver import import_meta_graph
from tensorflow.python.training.session_run_hook import SessionRunHook
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.session_run_hook import SessionRunContext
from tensorflow.python.training.session_run_hook import SessionRunValues
from tensorflow.python.training.session_manager import SessionManager
from tensorflow.python.training.summary_io import summary_iterator
from tensorflow.python.training.supervisor import Supervisor
from tensorflow.python.training.training_util import write_graph
from tensorflow.python.training.training_util import global_step
from tensorflow.python.training.training_util import get_global_step
from tensorflow.python.training.training_util import assert_global_step
from tensorflow.python.training.training_util import create_global_step
from tensorflow.python.training.training_util import get_or_create_global_step
from tensorflow.python.pywrap_tensorflow import do_quantize_training_on_graphdef
from tensorflow.python.pywrap_tensorflow import NewCheckpointReader
# pylint: disable=wildcard-import
# Training data protos.
from tensorflow.core.example.example_pb2 import *
from tensorflow.core.example.feature_pb2 import *
from tensorflow.core.protobuf.saver_pb2 import *
# Utility op. Open Source. TODO(touts): move to nn?
from tensorflow.python.training.learning_rate_decay import *
# pylint: enable=wildcard-import
# Distributed computing support.
from tensorflow.core.protobuf.cluster_pb2 import ClusterDef
from tensorflow.core.protobuf.cluster_pb2 import JobDef
from tensorflow.core.protobuf.tensorflow_server_pb2 import ServerDef
from tensorflow.python.training.server_lib import ClusterSpec
from tensorflow.python.training.server_lib import Server
# Symbols whitelisted for export without documentation.
_allowed_symbols = [
# TODO(cwhipkey): review these and move to contrib or expose through
# documentation.
"generate_checkpoint_state_proto", # Used internally by saver.
"checkpoint_exists", # Only used in test?
"get_checkpoint_mtimes", # Only used in test?
# Legacy: remove.
"do_quantize_training_on_graphdef", # At least use grah_def, not graphdef.
# No uses within tensorflow.
"queue_runner", # Use tf.train.start_queue_runner etc directly.
# This is also imported internally.
# TODO(drpng): document these. The reference in howtos/distributed does
# not link.
"SyncReplicasOptimizer",
# Protobufs:
"BytesList", # from example_pb2.
"ClusterDef",
"Example", # from example_pb2
"Feature", # from example_pb2
"Features", # from example_pb2
"FeatureList", # from example_pb2
"FeatureLists", # from example_pb2
"FloatList", # from example_pb2.
"Int64List", # from example_pb2.
"JobDef",
"SaverDef", # From saver_pb2.
"SequenceExample", # from example_pb2.
"ServerDef",
]
# Include extra modules for docstrings because:
# * Input methods in tf.train are documented in io_ops.
# * Saver methods in tf.train are documented in state_ops.
remove_undocumented(__name__, _allowed_symbols,
[_sys.modules[__name__], _io_ops, _sdca_ops, _state_ops])
| apache-2.0 |
chrisvans/roastdoge | coffee/tests.py | 1 | 1705 | # Django
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.core.urlresolvers import reverse
from django.http import JsonResponse
from django.test import TestCase, Client, RequestFactory
# Ours
import factories
import models
import ajax
import forms
import views
# Third Party
from rest_framework.test import APIRequestFactory
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.support import ui
# Python
import time
# TODO: Create functional tests for the listviews' (RoastProfile, Coffee) delete action
class TestAjaxViews(TestCase):
def setUp(self):
self.coffee = factories.CoffeeFactory.create()
self.coffee._generate_profile()
self.roastprofile = self.coffee.roastprofile_set.all()[0]
self.request_factory = RequestFactory()
self.api_request_factory = APIRequestFactory()
def tearDown(self):
self.coffee.delete()
def test_coffee_delete(self):
"""
Test that this view properly deletes a Coffee based on it's ID
"""
request = self.api_request_factory.delete(
reverse(
'rest-coffee-detail',
args=(self.coffee.id,),
)
)
self.assertEquals(models.Coffee.objects.filter(id=self.coffee.id).exists(), True)
view = views.CoffeeViewSet.as_view(actions={'delete':'destroy'})
response = view(request, pk=self.coffee.id)
self.assertEquals(models.Coffee.objects.filter(id=self.coffee.id).exists(), False)
# Assert that some success code (2xx) was received.
self.assertEquals(str(response.status_code).startswith('2'), True)
| mit |
ruibarreira/linuxtrail | usr/lib/python3.4/textwrap.py | 83 | 19104 | """Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <gward@python.net>
import re
__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent', 'indent', 'shorten']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 0 .. 'tabsize' spaces, depending on its position
in its line. If false, each tab is treated as a single character.
tabsize (default: 8)
Expand tabs in input text to 0 .. 'tabsize' spaces, unless
'expand_tabs' is false.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
max_lines (default: None)
Truncate wrapped lines.
placeholder (default: ' [...]')
Append to the last line of truncated text.
"""
unicode_whitespace_trans = {}
uspace = ord(' ')
for x in _whitespace:
unicode_whitespace_trans[ord(x)] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[a-z]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z') # end of chunk
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True,
tabsize=8,
*,
max_lines=None,
placeholder=' [...]'):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
self.tabsize = tabsize
self.max_lines = max_lines
self.placeholder = placeholder
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs(self.tabsize)
if self.replace_whitespace:
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if self.break_on_hyphens is True:
chunks = self.wordsep_re.split(text)
else:
chunks = self.wordsep_simple_re.split(text)
chunks = [c for c in chunks if c]
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
patsearch = self.sentence_end_re.search
while i < len(chunks)-1:
if chunks[i+1] == " " and patsearch(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
if self.max_lines is not None:
if self.max_lines > 1:
indent = self.subsequent_indent
else:
indent = self.initial_indent
if len(indent) + len(self.placeholder.lstrip()) > self.width:
raise ValueError("placeholder too large for max width")
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
cur_len = sum(map(len, cur_line))
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
cur_len -= len(cur_line[-1])
del cur_line[-1]
if cur_line:
if (self.max_lines is None or
len(lines) + 1 < self.max_lines or
(not chunks or
self.drop_whitespace and
len(chunks) == 1 and
not chunks[0].strip()) and cur_len <= width):
# Convert current line back to a string and store it in
# list of all lines (return value).
lines.append(indent + ''.join(cur_line))
else:
while cur_line:
if (cur_line[-1].strip() and
cur_len + len(self.placeholder) <= width):
cur_line.append(self.placeholder)
lines.append(indent + ''.join(cur_line))
break
cur_len -= len(cur_line[-1])
del cur_line[-1]
else:
if lines:
prev_line = lines[-1].rstrip()
if (len(prev_line) + len(self.placeholder) <=
self.width):
lines[-1] = prev_line + self.placeholder
break
lines.append(indent + self.placeholder.lstrip())
break
return lines
def _split_chunks(self, text):
text = self._munge_whitespace(text)
return self._split(text)
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
chunks = self._split_chunks(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
def shorten(text, width, **kwargs):
"""Collapse and truncate the given text to fit in the given width.
The text first has its whitespace collapsed. If it then fits in
the *width*, it is returned as is. Otherwise, as many words
as possible are joined and then the placeholder is appended::
>>> textwrap.shorten("Hello world!", width=12)
'Hello world!'
>>> textwrap.shorten("Hello world!", width=11)
'Hello [...]'
"""
w = TextWrapper(width=width, max_lines=1, **kwargs)
return w.fill(' '.join(text.strip().split()))
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
def indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
"""
if predicate is None:
def predicate(line):
return line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print(dedent("Hello there.\n This is indented."))
| gpl-3.0 |
oscarolar/odoo | addons/hr_timesheet_invoice/__openerp__.py | 64 | 2474 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Invoice on Timesheets',
'version': '1.0',
'category': 'Sales Management',
'description': """
Generate your Invoices from Expenses, Timesheet Entries.
========================================================
Module to generate invoices based on costs (human resources, expenses, ...).
You can define price lists in analytic account, make some theoretical revenue
reports.""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/hr_bill_task_work.jpeg','images/hr_type_of_invoicing.jpeg'],
'depends': ['account', 'hr_timesheet', 'report'],
'data': [
'security/ir.model.access.csv',
'hr_timesheet_invoice_data.xml',
'hr_timesheet_invoice_view.xml',
'hr_timesheet_invoice_wizard.xml',
'hr_timesheet_invoice_report.xml',
'report/report_analytic_view.xml',
'report/hr_timesheet_invoice_report_view.xml',
'wizard/hr_timesheet_analytic_profit_view.xml',
'wizard/hr_timesheet_invoice_create_view.xml',
'wizard/hr_timesheet_invoice_create_final_view.xml',
'views/report_analyticprofit.xml',
],
'demo': ['hr_timesheet_invoice_demo.xml'],
'test': ['test/test_hr_timesheet_invoice.yml',
'test/test_hr_timesheet_invoice_no_prod_tax.yml',
'test/hr_timesheet_invoice_report.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
spyofchina/shadowsocks | shadowsocks/lru_cache.py | 983 | 4290 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import collections
import logging
import time
# this LRUCache is optimized for concurrency, not QPS
# n: concurrency, keys stored in the cache
# m: visits not timed out, proportional to QPS * timeout
# get & set is O(1), not O(n). thus we can support very large n
# TODO: if timeout or QPS is too large, then this cache is not very efficient,
# as sweep() causes long pause
class LRUCache(collections.MutableMapping):
"""This class is not thread safe"""
def __init__(self, timeout=60, close_callback=None, *args, **kwargs):
self.timeout = timeout
self.close_callback = close_callback
self._store = {}
self._time_to_keys = collections.defaultdict(list)
self._keys_to_last_time = {}
self._last_visits = collections.deque()
self._closed_values = set()
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._time_to_keys[t].append(key)
self._last_visits.append(t)
return self._store[key]
def __setitem__(self, key, value):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._store[key] = value
self._time_to_keys[t].append(key)
self._last_visits.append(t)
def __delitem__(self, key):
# O(1)
del self._store[key]
del self._keys_to_last_time[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def sweep(self):
# O(m)
now = time.time()
c = 0
while len(self._last_visits) > 0:
least = self._last_visits[0]
if now - least <= self.timeout:
break
if self.close_callback is not None:
for key in self._time_to_keys[least]:
if key in self._store:
if now - self._keys_to_last_time[key] > self.timeout:
value = self._store[key]
if value not in self._closed_values:
self.close_callback(value)
self._closed_values.add(value)
for key in self._time_to_keys[least]:
self._last_visits.popleft()
if key in self._store:
if now - self._keys_to_last_time[key] > self.timeout:
del self._store[key]
del self._keys_to_last_time[key]
c += 1
del self._time_to_keys[least]
if c:
self._closed_values.clear()
logging.debug('%d keys swept' % c)
def test():
c = LRUCache(timeout=0.3)
c['a'] = 1
assert c['a'] == 1
time.sleep(0.5)
c.sweep()
assert 'a' not in c
c['a'] = 2
c['b'] = 3
time.sleep(0.2)
c.sweep()
assert c['a'] == 2
assert c['b'] == 3
time.sleep(0.2)
c.sweep()
c['b']
time.sleep(0.2)
c.sweep()
assert 'a' not in c
assert c['b'] == 3
time.sleep(0.5)
c.sweep()
assert 'a' not in c
assert 'b' not in c
global close_cb_called
close_cb_called = False
def close_cb(t):
global close_cb_called
assert not close_cb_called
close_cb_called = True
c = LRUCache(timeout=0.1, close_callback=close_cb)
c['s'] = 1
c['s']
time.sleep(0.1)
c['s']
time.sleep(0.3)
c.sweep()
if __name__ == '__main__':
test()
| apache-2.0 |
marcoantoniooliveira/labweb | oscar/views/decorators.py | 1 | 4985 | from functools import wraps
from six.moves.urllib import parse
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import render
from django.contrib import messages
from django.contrib.auth.views import redirect_to_login
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
def staff_member_required(view_func, login_url=None):
"""
Ensure that the user is a logged-in staff member.
* If not authenticated, redirect to a specified login URL.
* If not staff, show a 403 page
This decorator is based on the decorator with the same name from
django.contrib.admin.views.decorators. This one is superior as it allows a
redirect URL to be specified.
"""
if login_url is None:
login_url = reverse_lazy('customer:login')
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_staff:
return view_func(request, *args, **kwargs)
# If user is not logged in, redirect to login page
if not request.user.is_authenticated():
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
path = request.build_absolute_uri()
login_scheme, login_netloc = parse.urlparse(login_url)[:2]
current_scheme, current_netloc = parse.urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
messages.warning(request, _("You must log in to access this page"))
return redirect_to_login(path, login_url, REDIRECT_FIELD_NAME)
else:
# User does not have permission to view this page
raise PermissionDenied
return _checklogin
def check_permissions(user, permissions):
"""
Permissions can be a list or a tuple of lists. If it is a tuple,
every permission list will be evaluated and the outcome will be checked
for truthiness.
Each item of the list(s) must be either a valid Django permission name
(model.codename) or a property or method on the User model
(e.g. 'is_active', 'is_superuser').
Example usage:
- permissions_required(['is_staff', ])
would replace staff_member_required
- permissions_required(['is_anonymous', ])
would replace login_forbidden
- permissions_required((['is_staff',], ['partner.dashboard_access']))
allows both staff users and users with the above permission
"""
def _check_one_permission_list(perms):
regular_permissions = [perm for perm in perms if '.' in perm]
conditions = [perm for perm in perms if '.' not in perm]
# always check for is_active if not checking for is_anonymous
if (conditions and
'is_anonymous' not in conditions and
'is_active' not in conditions):
conditions.append('is_active')
attributes = [getattr(user, perm) for perm in conditions]
# evaluates methods, explicitly casts properties to booleans
passes_conditions = all([
attr() if callable(attr) else bool(attr) for attr in attributes])
return passes_conditions and user.has_perms(regular_permissions)
if not permissions:
return True
elif isinstance(permissions, list):
return _check_one_permission_list(permissions)
else:
return any(_check_one_permission_list(perm) for perm in permissions)
def permissions_required(permissions, login_url=None):
"""
Decorator that checks if a user has the given permissions.
Accepts a list or tuple of lists of permissions (see check_permissions
documentation).
If the user is not logged in and the test fails, she is redirected to a
login page. If the user is logged in, she gets a HTTP 403 Permission Denied
message, analogous to Django's permission_required decorator.
"""
if login_url is None:
login_url = reverse_lazy('customer:login')
def _check_permissions(user):
outcome = check_permissions(user, permissions)
if not outcome and user.is_authenticated():
raise PermissionDenied
else:
return outcome
return user_passes_test(_check_permissions, login_url=login_url)
def login_forbidden(view_func, template_name='login_forbidden.html',
status=403):
"""
Only allow anonymous users to access this view.
"""
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if not request.user.is_authenticated():
return view_func(request, *args, **kwargs)
return render(request, template_name, status=status)
return _checklogin
| bsd-3-clause |
xingyepei/edx-platform | cms/djangoapps/contentstore/utils.py | 48 | 16825 | """
Common utility functions useful throughout the contentstore
"""
import logging
from opaque_keys import InvalidKeyError
import re
from datetime import datetime
from pytz import UTC
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django_comment_common.models import assign_default_role
from django_comment_common.utils import seed_permissions_roles
from xmodule.contentstore.content import StaticContent
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from opaque_keys.edx.keys import UsageKey, CourseKey
from student.roles import CourseInstructorRole, CourseStaffRole
from student.models import CourseEnrollment
from student import auth
log = logging.getLogger(__name__)
def add_instructor(course_key, requesting_user, new_instructor):
"""
Adds given user as instructor and staff to the given course,
after verifying that the requesting_user has permission to do so.
"""
# can't use auth.add_users here b/c it requires user to already have Instructor perms in this course
CourseInstructorRole(course_key).add_users(new_instructor)
auth.add_users(requesting_user, CourseStaffRole(course_key), new_instructor)
def initialize_permissions(course_key, user_who_created_course):
"""
Initializes a new course by enrolling the course creator as a student,
and initializing Forum by seeding its permissions and assigning default roles.
"""
# seed the forums
seed_permissions_roles(course_key)
# auto-enroll the course creator in the course so that "View Live" will work.
CourseEnrollment.enroll(user_who_created_course, course_key)
# set default forum roles (assign 'Student' role)
assign_default_role(course_key, user_who_created_course)
def remove_all_instructors(course_key):
"""
Removes all instructor and staff users from the given course.
"""
staff_role = CourseStaffRole(course_key)
staff_role.remove_users(*staff_role.users_with_role())
instructor_role = CourseInstructorRole(course_key)
instructor_role.remove_users(*instructor_role.users_with_role())
def delete_course_and_groups(course_key, user_id):
"""
This deletes the courseware associated with a course_key as well as cleaning update_item
the various user table stuff (groups, permissions, etc.)
"""
module_store = modulestore()
with module_store.bulk_operations(course_key):
module_store.delete_course(course_key, user_id)
print 'removing User permissions from course....'
# in the django layer, we need to remove all the user permissions groups associated with this course
try:
remove_all_instructors(course_key)
except Exception as err:
log.error("Error in deleting course groups for {0}: {1}".format(course_key, err))
def get_lms_link_for_item(location, preview=False):
"""
Returns an LMS link to the course with a jump_to to the provided location.
:param location: the location to jump to
:param preview: True if the preview version of LMS should be returned. Default value is false.
"""
assert isinstance(location, UsageKey)
if settings.LMS_BASE is None:
return None
if preview:
lms_base = settings.FEATURES.get('PREVIEW_LMS_BASE')
else:
lms_base = settings.LMS_BASE
return u"//{lms_base}/courses/{course_key}/jump_to/{location}".format(
lms_base=lms_base,
course_key=location.course_key.to_deprecated_string(),
location=location.to_deprecated_string(),
)
def get_lms_link_for_about_page(course_key):
"""
Returns the url to the course about page from the location tuple.
"""
assert isinstance(course_key, CourseKey)
if settings.FEATURES.get('ENABLE_MKTG_SITE', False):
if not hasattr(settings, 'MKTG_URLS'):
log.exception("ENABLE_MKTG_SITE is True, but MKTG_URLS is not defined.")
return None
marketing_urls = settings.MKTG_URLS
# Root will be "https://www.edx.org". The complete URL will still not be exactly correct,
# but redirects exist from www.edx.org to get to the Drupal course about page URL.
about_base = marketing_urls.get('ROOT', None)
if about_base is None:
log.exception('There is no ROOT defined in MKTG_URLS')
return None
# Strip off https:// (or http://) to be consistent with the formatting of LMS_BASE.
about_base = re.sub(r"^https?://", "", about_base)
elif settings.LMS_BASE is not None:
about_base = settings.LMS_BASE
else:
return None
return u"//{about_base_url}/courses/{course_key}/about".format(
about_base_url=about_base,
course_key=course_key.to_deprecated_string()
)
# pylint: disable=invalid-name
def get_lms_link_for_certificate_web_view(user_id, course_key, mode):
"""
Returns the url to the certificate web view.
"""
assert isinstance(course_key, CourseKey)
if settings.LMS_BASE is None:
return None
return u"//{certificate_web_base}/certificates/user/{user_id}/course/{course_id}?preview={mode}".format(
certificate_web_base=settings.LMS_BASE,
user_id=user_id,
course_id=unicode(course_key),
mode=mode
)
def course_image_url(course):
"""Returns the image url for the course."""
try:
loc = StaticContent.compute_location(course.location.course_key, course.course_image)
except InvalidKeyError:
return ''
path = StaticContent.serialize_asset_key_with_slash(loc)
return path
# pylint: disable=invalid-name
def is_currently_visible_to_students(xblock):
"""
Returns true if there is a published version of the xblock that is currently visible to students.
This means that it has a release date in the past, and the xblock has not been set to staff only.
"""
try:
published = modulestore().get_item(xblock.location, revision=ModuleStoreEnum.RevisionOption.published_only)
# If there's no published version then the xblock is clearly not visible
except ItemNotFoundError:
return False
# If visible_to_staff_only is True, this xblock is not visible to students regardless of start date.
if published.visible_to_staff_only:
return False
# Check start date
if 'detached' not in published._class_tags and published.start is not None:
return datetime.now(UTC) > published.start
# No start date, so it's always visible
return True
def has_children_visible_to_specific_content_groups(xblock):
"""
Returns True if this xblock has children that are limited to specific content groups.
Note that this method is not recursive (it does not check grandchildren).
"""
if not xblock.has_children:
return False
for child in xblock.get_children():
if is_visible_to_specific_content_groups(child):
return True
return False
def is_visible_to_specific_content_groups(xblock):
"""
Returns True if this xblock has visibility limited to specific content groups.
"""
if not xblock.group_access:
return False
for partition in get_user_partition_info(xblock):
if any(g["selected"] for g in partition["groups"]):
return True
return False
def find_release_date_source(xblock):
"""
Finds the ancestor of xblock that set its release date.
"""
# Stop searching at the section level
if xblock.category == 'chapter':
return xblock
parent_location = modulestore().get_parent_location(xblock.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# Orphaned xblocks set their own release date
if not parent_location:
return xblock
parent = modulestore().get_item(parent_location)
if parent.start != xblock.start:
return xblock
else:
return find_release_date_source(parent)
def find_staff_lock_source(xblock):
"""
Returns the xblock responsible for setting this xblock's staff lock, or None if the xblock is not staff locked.
If this xblock is explicitly locked, return it, otherwise find the ancestor which sets this xblock's staff lock.
"""
# Stop searching if this xblock has explicitly set its own staff lock
if xblock.fields['visible_to_staff_only'].is_set_on(xblock):
return xblock
# Stop searching at the section level
if xblock.category == 'chapter':
return None
parent_location = modulestore().get_parent_location(xblock.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# Orphaned xblocks set their own staff lock
if not parent_location:
return None
parent = modulestore().get_item(parent_location)
return find_staff_lock_source(parent)
def ancestor_has_staff_lock(xblock, parent_xblock=None):
"""
Returns True iff one of xblock's ancestors has staff lock.
Can avoid mongo query by passing in parent_xblock.
"""
if parent_xblock is None:
parent_location = modulestore().get_parent_location(xblock.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred)
if not parent_location:
return False
parent_xblock = modulestore().get_item(parent_location)
return parent_xblock.visible_to_staff_only
def reverse_url(handler_name, key_name=None, key_value=None, kwargs=None):
"""
Creates the URL for the given handler.
The optional key_name and key_value are passed in as kwargs to the handler.
"""
kwargs_for_reverse = {key_name: unicode(key_value)} if key_name else None
if kwargs:
kwargs_for_reverse.update(kwargs)
return reverse('contentstore.views.' + handler_name, kwargs=kwargs_for_reverse)
def reverse_course_url(handler_name, course_key, kwargs=None):
"""
Creates the URL for handlers that use course_keys as URL parameters.
"""
return reverse_url(handler_name, 'course_key_string', course_key, kwargs)
def reverse_library_url(handler_name, library_key, kwargs=None):
"""
Creates the URL for handlers that use library_keys as URL parameters.
"""
return reverse_url(handler_name, 'library_key_string', library_key, kwargs)
def reverse_usage_url(handler_name, usage_key, kwargs=None):
"""
Creates the URL for handlers that use usage_keys as URL parameters.
"""
return reverse_url(handler_name, 'usage_key_string', usage_key, kwargs)
def has_active_web_certificate(course):
"""
Returns True if given course has active web certificate configuration.
If given course has no active web certificate configuration returns False.
Returns None If `CERTIFICATES_HTML_VIEW` is not enabled of course has not enabled
`cert_html_view_enabled` settings.
"""
cert_config = None
if settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False) and course.cert_html_view_enabled:
cert_config = False
certificates = getattr(course, 'certificates', {})
configurations = certificates.get('certificates', [])
for config in configurations:
if config.get('is_active'):
cert_config = True
break
return cert_config
def get_user_partition_info(xblock, schemes=None, course=None):
"""
Retrieve user partition information for an XBlock for display in editors.
* If a partition has been disabled, it will be excluded from the results.
* If a group within a partition is referenced by the XBlock, but the group has been deleted,
the group will be marked as deleted in the results.
Arguments:
xblock (XBlock): The courseware component being edited.
Keyword Arguments:
schemes (iterable of str): If provided, filter partitions to include only
schemes with the provided names.
course (XBlock): The course descriptor. If provided, uses this to look up the user partitions
instead of loading the course. This is useful if we're calling this function multiple
times for the same course want to minimize queries to the modulestore.
Returns: list
Example Usage:
>>> get_user_partition_info(block, schemes=["cohort", "verification"])
[
{
"id": 12345,
"name": "Cohorts"
"scheme": "cohort",
"groups": [
{
"id": 7890,
"name": "Foo",
"selected": True,
"deleted": False,
}
]
},
{
"id": 7292,
"name": "Midterm A",
"scheme": "verification",
"groups": [
{
"id": 1,
"name": "Completed verification at Midterm A",
"selected": False,
"deleted": False
},
{
"id": 0,
"name": "Did not complete verification at Midterm A",
"selected": False,
"deleted": False,
}
]
}
]
"""
course = course or modulestore().get_course(xblock.location.course_key)
if course is None:
log.warning(
"Could not find course %s to retrieve user partition information",
xblock.location.course_key
)
return []
if schemes is not None:
schemes = set(schemes)
partitions = []
for p in sorted(course.user_partitions, key=lambda p: p.name):
# Exclude disabled partitions, partitions with no groups defined
# Also filter by scheme name if there's a filter defined.
if p.active and p.groups and (schemes is None or p.scheme.name in schemes):
# First, add groups defined by the partition
groups = []
for g in p.groups:
# Falsey group access for a partition mean that all groups
# are selected. In the UI, though, we don't show the particular
# groups selected, since there's a separate option for "all users".
selected_groups = set(xblock.group_access.get(p.id, []) or [])
groups.append({
"id": g.id,
"name": g.name,
"selected": g.id in selected_groups,
"deleted": False,
})
# Next, add any groups set on the XBlock that have been deleted
all_groups = set(g.id for g in p.groups)
missing_group_ids = selected_groups - all_groups
for gid in missing_group_ids:
groups.append({
"id": gid,
"name": _("Deleted group"),
"selected": True,
"deleted": True,
})
# Put together the entire partition dictionary
partitions.append({
"id": p.id,
"name": p.name,
"scheme": p.scheme.name,
"groups": groups,
})
return partitions
def get_visibility_partition_info(xblock):
"""
Retrieve user partition information for the component visibility editor.
This pre-processes partition information to simplify the template.
Arguments:
xblock (XBlock): The component being edited.
Returns: dict
"""
user_partitions = get_user_partition_info(xblock, schemes=["verification", "cohort"])
cohort_partitions = []
verification_partitions = []
has_selected_groups = False
selected_verified_partition_id = None
# Pre-process the partitions to make it easier to display the UI
for p in user_partitions:
has_selected = any(g["selected"] for g in p["groups"])
has_selected_groups = has_selected_groups or has_selected
if p["scheme"] == "cohort":
cohort_partitions.append(p)
elif p["scheme"] == "verification":
verification_partitions.append(p)
if has_selected:
selected_verified_partition_id = p["id"]
return {
"user_partitions": user_partitions,
"cohort_partitions": cohort_partitions,
"verification_partitions": verification_partitions,
"has_selected_groups": has_selected_groups,
"selected_verified_partition_id": selected_verified_partition_id,
}
| agpl-3.0 |
arbrandes/edx-platform | openedx/core/djangoapps/programs/migrations/0013_customprogramsconfig.py | 4 | 1270 | # Generated by Django 1.11.26 on 2019-12-13 07:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('programs', '0012_auto_20170419_0018'),
]
operations = [
migrations.CreateModel(
name='CustomProgramsConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('arguments', models.TextField(blank=True, default='', help_text='Useful for manually running a Jenkins job. Specify like "--usernames A B --program-uuids X Y".')),
('changed_by', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='Changed by')),
],
options={
'verbose_name': 'backpopulate_program_credentials argument',
},
),
]
| agpl-3.0 |
jamesward-demo/air-quick-fix | AIRQuickFixServer/pyamf/adapters/_google_appengine_ext_db.py | 1 | 2171 | # Copyright (c) 2007-2008 The PyAMF Project.
# See LICENSE for details.
"""
Google App Engine adapter module.
Sets up basic type mapping and class mappings for using the Datastore API
in Google App Engine.
@see: U{Datastore API on Google App Engine (external)
<http://code.google.com/appengine/docs/datastore>}
@author: U{Nick Joyce<mailto:nick@boxdesign.co.uk>}
@since: 0.3.1
"""
from google.appengine.ext import db
import pyamf
from pyamf import amf0, amf3
def writeObjectAMF0(self, obj, *args, **kwargs):
alias = self.context.getClassAlias(obj.__class__)
remove = False
if alias is None:
remove = True
self.context.class_aliases[obj.__class__] = pyamf.ClassAlias(obj.__class__, None)
self.writeObject(obj, *args, **kwargs)
if remove:
self.context.class_aliases[obj.__class__] = None
def writeObjectAMF3(self, obj, *args, **kwargs):
try:
self.context.getClassDefinitionReference(obj)
except pyamf.ReferenceError:
alias = self.context.getClassAlias(obj.__class__)
class_def = None
remove = False
if alias is None:
remove = True
alias = pyamf.ClassAlias(obj.__class__, None)
self.context.class_aliases[obj.__class__] = alias
self.writeObject(obj, *args, **kwargs)
if remove:
self.context.class_aliases[obj.__class__] = None
def get_attrs_for_model(obj):
"""
Returns a list of properties on an C{db.Model} instance.
"""
return list(obj.__class__._properties)
def get_attrs_for_expando(obj):
"""
Returns a list of dynamic properties on a C{db.Expando} instance.
"""
return obj.dynamic_properties()
pyamf.register_class(db.Model, attr_func=get_attrs_for_model, metadata=['dynamic'])
pyamf.register_class(db.Expando, attr_func=get_attrs_for_expando, metadata=['dynamic'])
amf0.Encoder.writeGoogleModel = writeObjectAMF0
amf0.Encoder.type_map.insert(len(amf0.Encoder.type_map) - 1, ((db.Model,db.Expando), "writeGoogleModel"))
amf3.Encoder.writeGoogleModel = writeObjectAMF3
amf3.Encoder.type_map.insert(len(amf3.Encoder.type_map) - 1, ((db.Model,db.Expando), "writeGoogleModel"))
| apache-2.0 |
NamelessRom/android_kernel_samsung_latona | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
jaloren/robotframework | atest/interpreter.py | 4 | 5347 | from os.path import abspath, dirname, join, normpath
import os
import subprocess
import sys
def get_variables(path, name=None, version=None):
return {'INTERPRETER': InterpreterFactory(path, name, version)}
def InterpreterFactory(path, name=None, version=None):
if path.endswith('.jar'):
return StandaloneInterpreter(path, name, version)
return Interpreter(path, name, version)
class Interpreter(object):
def __init__(self, path, name=None, version=None):
self.path = path
self.interpreter = self._get_interpreter(path)
if not name:
name, version = self._get_name_and_version()
self.name = name
self.version = version
self._robot_path = normpath(join(dirname(abspath(__file__)),
'..', 'src', 'robot'))
def _get_interpreter(self, path):
return [path] if os.path.exists(path) else path.split()
def _get_name_and_version(self):
try:
output = subprocess.check_output(self.interpreter + ['-V'],
stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError, OSError):
raise ValueError('Invalid interpreter: %s' % self.path)
name, version = output.split()[:2]
version = '.'.join(version.split('.')[:2])
return name, version
@property
def excludes(self):
if self.is_python and self.version == '2.6':
yield 'no-python26'
yield 'require-et13'
if self.is_jython:
yield 'no-jython'
yield 'require-lxml'
else:
yield 'require-jython'
if self.is_ironpython:
yield 'no-ipy'
yield 'require-et13'
yield 'require-lxml'
yield 'require-docutils' # https://github.com/IronLanguages/main/issues/1230
else:
yield 'require-ipy'
for exclude in self._platform_excludes:
yield exclude
@property
def _platform_excludes(self):
if self.is_py3:
yield 'no-py3'
else:
yield 'no-py2'
if self.is_windows:
yield 'no-windows'
if self.is_jython:
yield 'no-windows-jython'
if self.is_python and self.version == '2.6':
yield 'no-windows-python26'
if not self.is_windows:
yield 'require-windows'
if self.is_osx:
yield 'no-osx'
if self.is_python:
yield 'no-osx-python'
@property
def is_python(self):
return self.name == 'Python'
@property
def is_jython(self):
return self.name == 'Jython'
@property
def is_ironpython(self):
return self.name == 'IronPython'
@property
def is_py2(self):
return self.version[0] == '2'
@property
def is_py3(self):
return self.version[0] == '3'
@property
def is_linux(self):
return 'linux' in sys.platform
@property
def is_osx(self):
return sys.platform == 'darwin'
@property
def is_windows(self):
return os.name == 'nt'
@property
def os(self):
for condition, name in [(self.is_linux, 'Linux'),
(self.is_osx, 'OS X'),
(self.is_windows, 'Windows')]:
if condition:
return name
return sys.platform
@property
def runner(self):
return self.interpreter + [join(self._robot_path, 'run.py')]
@property
def rebot(self):
return self.interpreter + [join(self._robot_path, 'rebot.py')]
@property
def libdoc(self):
return self.interpreter + [join(self._robot_path, 'libdoc.py')]
@property
def testdoc(self):
return self.interpreter + [join(self._robot_path, 'testdoc.py')]
@property
def tidy(self):
return self.interpreter + [join(self._robot_path, 'tidy.py')]
class StandaloneInterpreter(Interpreter):
def __init__(self, path, name=None, version=None):
Interpreter.__init__(self, abspath(path), name or 'Standalone JAR',
version or '2.7')
def _get_interpreter(self, path):
interpreter = ['java', '-jar', path]
classpath = os.environ.get('CLASSPATH')
if classpath:
interpreter.insert(1, '-Xbootclasspath/a:%s' % classpath)
return interpreter
@property
def excludes(self):
for exclude in ['no-standalone', 'no-jython', 'require-lxml',
'require-docutils', 'require-ipy']:
yield exclude
for exclude in self._platform_excludes:
yield exclude
@property
def is_python(self):
return False
@property
def is_jython(self):
return True
@property
def is_ironpython(self):
return False
@property
def runner(self):
return self.interpreter + ['run']
@property
def rebot(self):
return self.interpreter + ['rebot']
@property
def libdoc(self):
return self.interpreter + ['libdoc']
@property
def testdoc(self):
return self.interpreter + ['testdoc']
@property
def tidy(self):
return self.interpreter + ['tidy']
| apache-2.0 |
clay23/lab4 | lib/werkzeug/contrib/testtools.py | 319 | 2449 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.testtools
~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements extended wrappers for simplified testing.
`TestResponse`
A response wrapper which adds various cached attributes for
simplified assertions on various content types.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.utils import cached_property, import_string
from werkzeug.wrappers import Response
from warnings import warn
warn(DeprecationWarning('werkzeug.contrib.testtools is deprecated and '
'will be removed with Werkzeug 1.0'))
class ContentAccessors(object):
"""
A mixin class for response objects that provides a couple of useful
accessors for unittesting.
"""
def xml(self):
"""Get an etree if possible."""
if 'xml' not in self.mimetype:
raise AttributeError(
'Not a XML response (Content-Type: %s)'
% self.mimetype)
for module in ['xml.etree.ElementTree', 'ElementTree',
'elementtree.ElementTree']:
etree = import_string(module, silent=True)
if etree is not None:
return etree.XML(self.body)
raise RuntimeError('You must have ElementTree installed '
'to use TestResponse.xml')
xml = cached_property(xml)
def lxml(self):
"""Get an lxml etree if possible."""
if ('html' not in self.mimetype and 'xml' not in self.mimetype):
raise AttributeError('Not an HTML/XML response')
from lxml import etree
try:
from lxml.html import fromstring
except ImportError:
fromstring = etree.HTML
if self.mimetype=='text/html':
return fromstring(self.data)
return etree.XML(self.data)
lxml = cached_property(lxml)
def json(self):
"""Get the result of simplejson.loads if possible."""
if 'json' not in self.mimetype:
raise AttributeError('Not a JSON response')
try:
from simplejson import loads
except ImportError:
from json import loads
return loads(self.data)
json = cached_property(json)
class TestResponse(Response, ContentAccessors):
"""Pass this to `werkzeug.test.Client` for easier unittesting."""
| apache-2.0 |
mixlab/cuda-convnet2 | python_util/gpumodel.py | 175 | 14896 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as n
import os
from time import time, asctime, localtime, strftime
from util import *
from data import *
from options import *
from math import ceil, floor, sqrt
from data import DataProvider, dp_types
import sys
import shutil
import platform
from os import linesep as NL
from threading import Thread
import tempfile as tf
class ModelStateException(Exception):
pass
class CheckpointWriter(Thread):
def __init__(self, path, dic):
Thread.__init__(self)
self.path = path
self.dic = dic
def run(self):
save_dir = os.path.dirname(self.path)
save_file = os.path.basename(self.path)
# Write checkpoint to temporary filename
tmpfile = tf.NamedTemporaryFile(dir=os.path.dirname(save_dir), delete=False)
pickle(tmpfile, self.dic) # Also closes tf
# Move it to final filename
os.rename(tmpfile.name, self.path)
# Delete old checkpoints
for f in os.listdir(save_dir):
if f != save_file:
os.remove(os.path.join(save_dir, f))
# GPU Model interface
class IGPUModel:
def __init__(self, model_name, op, load_dic, filename_options=[], dp_params={}):
# these are input parameters
self.model_name = model_name
self.op = op
self.options = op.options
self.load_dic = load_dic
self.filename_options = filename_options
self.dp_params = dp_params
self.device_ids = self.op.get_value('gpu')
self.fill_excused_options()
self.checkpoint_writer = None
#assert self.op.all_values_given()
for o in op.get_options_list():
setattr(self, o.name, o.value)
self.loaded_from_checkpoint = load_dic is not None
# these are things that the model must remember but they're not input parameters
if self.loaded_from_checkpoint:
self.model_state = load_dic["model_state"]
self.save_file = self.options["save_file_override"].value if self.options["save_file_override"].value_given else self.options['load_file'].value
if not os.path.isdir(self.save_file) and os.path.exists(self.save_file):
self.save_file = os.path.dirname(self.save_file)
# print self.options["save_file_override"].value, self.save_file
else:
self.model_state = {}
self.save_file = self.options["save_file_override"].value if self.options["save_file_override"].value_given else os.path.join(self.options['save_path'].value, model_name + "_" + '_'.join(['%s_%s' % (char, self.options[opt].get_str_value()) for opt, char in filename_options]) + '_' + strftime('%Y-%m-%d_%H.%M.%S'))
self.model_state["train_outputs"] = []
self.model_state["test_outputs"] = []
self.model_state["epoch"] = 1
self.model_state["batchnum"] = self.train_batch_range[0]
# print self.save_file
self.init_data_providers()
if load_dic:
self.train_data_provider.advance_batch()
# model state often requries knowledge of data provider, so it's initialized after
try:
self.init_model_state()
except ModelStateException, e:
print e
sys.exit(1)
for var, val in self.model_state.iteritems():
setattr(self, var, val)
self.import_model()
self.init_model_lib()
def import_model(self):
print "========================="
print "Importing %s C++ module" % ('_' + self.model_name)
self.libmodel = __import__('_' + self.model_name)
def fill_excused_options(self):
pass
def init_data_providers(self):
self.dp_params['convnet'] = self
try:
self.test_data_provider = DataProvider.get_instance(self.data_path, self.test_batch_range,
type=self.dp_type, dp_params=self.dp_params, test=True)
self.train_data_provider = DataProvider.get_instance(self.data_path, self.train_batch_range,
self.model_state["epoch"], self.model_state["batchnum"],
type=self.dp_type, dp_params=self.dp_params, test=False)
except DataProviderException, e:
print "Unable to create data provider: %s" % e
self.print_data_providers()
sys.exit()
def init_model_state(self):
pass
def init_model_lib(self):
pass
def start(self):
if self.test_only:
self.test_outputs += [self.get_test_error()]
self.print_test_results()
else:
self.train()
self.cleanup()
if self.force_save:
self.save_state().join()
sys.exit(0)
def train(self):
print "========================="
print "Training %s" % self.model_name
self.op.print_values()
print "========================="
self.print_model_state()
print "Running on CUDA device(s) %s" % ", ".join("%d" % d for d in self.device_ids)
print "Current time: %s" % asctime(localtime())
print "Saving checkpoints to %s" % self.save_file
print "========================="
next_data = self.get_next_batch()
while self.epoch <= self.num_epochs:
data = next_data
self.epoch, self.batchnum = data[0], data[1]
self.print_iteration()
sys.stdout.flush()
compute_time_py = time()
self.start_batch(data)
# load the next batch while the current one is computing
next_data = self.get_next_batch()
batch_output = self.finish_batch()
self.train_outputs += [batch_output]
self.print_train_results()
if self.get_num_batches_done() % self.testing_freq == 0:
self.sync_with_host()
self.test_outputs += [self.get_test_error()]
self.print_test_results()
self.print_test_status()
self.conditional_save()
self.print_elapsed_time(time() - compute_time_py)
def cleanup(self):
if self.checkpoint_writer is not None:
self.checkpoint_writer.join()
self.checkpoint_writer = None
def print_model_state(self):
pass
def get_num_batches_done(self):
return len(self.train_batch_range) * (self.epoch - 1) + self.batchnum - self.train_batch_range[0] + 1
def get_next_batch(self, train=True):
dp = self.train_data_provider
if not train:
dp = self.test_data_provider
return self.parse_batch_data(dp.get_next_batch(), train=train)
def parse_batch_data(self, batch_data, train=True):
return batch_data[0], batch_data[1], batch_data[2]['data']
def start_batch(self, batch_data, train=True):
self.libmodel.startBatch(batch_data[2], not train)
def finish_batch(self):
return self.libmodel.finishBatch()
def print_iteration(self):
print "\t%d.%d..." % (self.epoch, self.batchnum),
def print_elapsed_time(self, compute_time_py):
print "(%.3f sec)" % (compute_time_py)
def print_train_results(self):
batch_error = self.train_outputs[-1][0]
if not (batch_error > 0 and batch_error < 2e20):
print "Crazy train error: %.6f" % batch_error
self.cleanup()
print "Train error: %.6f " % (batch_error),
def print_test_results(self):
batch_error = self.test_outputs[-1][0]
print "%s\t\tTest error: %.6f" % (NL, batch_error),
def print_test_status(self):
status = (len(self.test_outputs) == 1 or self.test_outputs[-1][0] < self.test_outputs[-2][0]) and "ok" or "WORSE"
print status,
def sync_with_host(self):
if self.checkpoint_writer is not None:
self.checkpoint_writer.join()
self.checkpoint_writer = None
self.libmodel.syncWithHost()
def conditional_save(self):
batch_error = self.test_outputs[-1][0]
if batch_error > 0 and batch_error < self.max_test_err:
self.save_state()
else:
print "\tTest error > %g, not saving." % self.max_test_err,
def aggregate_test_outputs(self, test_outputs):
test_error = tuple([sum(t[r] for t in test_outputs) / (1 if self.test_one else len(self.test_batch_range)) for r in range(len(test_outputs[-1]))])
return test_error
def get_test_error(self):
next_data = self.get_next_batch(train=False)
test_outputs = []
while True:
data = next_data
start_time_test = time()
self.start_batch(data, train=False)
load_next = (not self.test_one or self.test_only) and data[1] < self.test_batch_range[-1]
if load_next: # load next batch
next_data = self.get_next_batch(train=False)
test_outputs += [self.finish_batch()]
if self.test_only: # Print the individual batch results for safety
print "batch %d: %s" % (data[1], str(test_outputs[-1])),
self.print_elapsed_time(time() - start_time_test)
if not load_next:
break
sys.stdout.flush()
return self.aggregate_test_outputs(test_outputs)
def set_var(self, var_name, var_val):
setattr(self, var_name, var_val)
self.model_state[var_name] = var_val
return var_val
def get_var(self, var_name):
return self.model_state[var_name]
def has_var(self, var_name):
return var_name in self.model_state
def save_state(self):
for att in self.model_state:
if hasattr(self, att):
self.model_state[att] = getattr(self, att)
dic = {"model_state": self.model_state,
"op": self.op}
checkpoint_file = "%d.%d" % (self.epoch, self.batchnum)
checkpoint_file_full_path = os.path.join(self.save_file, checkpoint_file)
if not os.path.exists(self.save_file):
os.makedirs(self.save_file)
assert self.checkpoint_writer is None
self.checkpoint_writer = CheckpointWriter(checkpoint_file_full_path, dic)
self.checkpoint_writer.start()
print "-------------------------------------------------------"
print "Saved checkpoint to %s" % self.save_file
print "=======================================================",
return self.checkpoint_writer
def get_progress(self):
num_batches_total = self.num_epochs * len(self.train_batch_range)
return min(1.0, max(0.0, float(self.get_num_batches_done()-1) / num_batches_total))
@staticmethod
def load_checkpoint(load_dir):
if os.path.isdir(load_dir):
return unpickle(os.path.join(load_dir, sorted(os.listdir(load_dir), key=alphanum_key)[-1]))
return unpickle(load_dir)
@staticmethod
def get_options_parser():
op = OptionsParser()
op.add_option("load-file", "load_file", StringOptionParser, "Load file", default="", excuses=OptionsParser.EXCUSE_ALL)
op.add_option("save-path", "save_path", StringOptionParser, "Save path", excuses=['save_file_override'])
op.add_option("save-file", "save_file_override", StringOptionParser, "Save file override", excuses=['save_path'])
op.add_option("train-range", "train_batch_range", RangeOptionParser, "Data batch range: training")
op.add_option("test-range", "test_batch_range", RangeOptionParser, "Data batch range: testing")
op.add_option("data-provider", "dp_type", StringOptionParser, "Data provider", default="default")
op.add_option("test-freq", "testing_freq", IntegerOptionParser, "Testing frequency", default=25)
op.add_option("epochs", "num_epochs", IntegerOptionParser, "Number of epochs", default=500)
op.add_option("data-path", "data_path", StringOptionParser, "Data path")
op.add_option("max-test-err", "max_test_err", FloatOptionParser, "Maximum test error for saving")
op.add_option("test-only", "test_only", BooleanOptionParser, "Test and quit?", default=0)
op.add_option("test-one", "test_one", BooleanOptionParser, "Test on one batch at a time?", default=1)
op.add_option("force-save", "force_save", BooleanOptionParser, "Force save before quitting", default=0)
op.add_option("gpu", "gpu", ListOptionParser(IntegerOptionParser), "GPU override")
return op
@staticmethod
def print_data_providers():
print "Available data providers:"
for dp, desc in dp_types.iteritems():
print " %s: %s" % (dp, desc)
@staticmethod
def parse_options(op):
try:
load_dic = None
options = op.parse()
load_location = None
# print options['load_file'].value_given, options['save_file_override'].value_given
# print options['save_file_override'].value
if options['load_file'].value_given:
load_location = options['load_file'].value
elif options['save_file_override'].value_given and os.path.exists(options['save_file_override'].value):
load_location = options['save_file_override'].value
if load_location is not None:
load_dic = IGPUModel.load_checkpoint(load_location)
old_op = load_dic["op"]
old_op.merge_from(op)
op = old_op
op.eval_expr_defaults()
return op, load_dic
except OptionMissingException, e:
print e
op.print_usage()
except OptionException, e:
print e
except UnpickleError, e:
print "Error loading checkpoint:"
print e
sys.exit()
| apache-2.0 |
yg257/Pangea | lib/boto-2.34.0/boto/glacier/job.py | 153 | 7999 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import math
import socket
from boto.glacier.exceptions import TreeHashDoesNotMatchError, \
DownloadArchiveError
from boto.glacier.utils import tree_hash_from_str
class Job(object):
DefaultPartSize = 4 * 1024 * 1024
ResponseDataElements = (('Action', 'action', None),
('ArchiveId', 'archive_id', None),
('ArchiveSizeInBytes', 'archive_size', 0),
('Completed', 'completed', False),
('CompletionDate', 'completion_date', None),
('CreationDate', 'creation_date', None),
('InventorySizeInBytes', 'inventory_size', 0),
('JobDescription', 'description', None),
('JobId', 'id', None),
('SHA256TreeHash', 'sha256_treehash', None),
('SNSTopic', 'sns_topic', None),
('StatusCode', 'status_code', None),
('StatusMessage', 'status_message', None),
('VaultARN', 'arn', None))
def __init__(self, vault, response_data=None):
self.vault = vault
if response_data:
for response_name, attr_name, default in self.ResponseDataElements:
setattr(self, attr_name, response_data[response_name])
else:
for response_name, attr_name, default in self.ResponseDataElements:
setattr(self, attr_name, default)
def __repr__(self):
return 'Job(%s)' % self.arn
def get_output(self, byte_range=None, validate_checksum=False):
"""
This operation downloads the output of the job. Depending on
the job type you specified when you initiated the job, the
output will be either the content of an archive or a vault
inventory.
You can download all the job output or download a portion of
the output by specifying a byte range. In the case of an
archive retrieval job, depending on the byte range you
specify, Amazon Glacier returns the checksum for the portion
of the data. You can compute the checksum on the client and
verify that the values match to ensure the portion you
downloaded is the correct data.
:type byte_range: tuple
:param range: A tuple of integer specifying the slice (in bytes)
of the archive you want to receive
:type validate_checksum: bool
:param validate_checksum: Specify whether or not to validate
the associate tree hash. If the response does not contain
a TreeHash, then no checksum will be verified.
"""
response = self.vault.layer1.get_job_output(self.vault.name,
self.id,
byte_range)
if validate_checksum and 'TreeHash' in response:
data = response.read()
actual_tree_hash = tree_hash_from_str(data)
if response['TreeHash'] != actual_tree_hash:
raise TreeHashDoesNotMatchError(
"The calculated tree hash %s does not match the "
"expected tree hash %s for the byte range %s" % (
actual_tree_hash, response['TreeHash'], byte_range))
return response
def _calc_num_chunks(self, chunk_size):
return int(math.ceil(self.archive_size / float(chunk_size)))
def download_to_file(self, filename, chunk_size=DefaultPartSize,
verify_hashes=True, retry_exceptions=(socket.error,)):
"""Download an archive to a file by name.
:type filename: str
:param filename: The name of the file where the archive
contents will be saved.
:type chunk_size: int
:param chunk_size: The chunk size to use when downloading
the archive.
:type verify_hashes: bool
:param verify_hashes: Indicates whether or not to verify
the tree hashes for each downloaded chunk.
"""
num_chunks = self._calc_num_chunks(chunk_size)
with open(filename, 'wb') as output_file:
self._download_to_fileob(output_file, num_chunks, chunk_size,
verify_hashes, retry_exceptions)
def download_to_fileobj(self, output_file, chunk_size=DefaultPartSize,
verify_hashes=True,
retry_exceptions=(socket.error,)):
"""Download an archive to a file object.
:type output_file: file
:param output_file: The file object where the archive
contents will be saved.
:type chunk_size: int
:param chunk_size: The chunk size to use when downloading
the archive.
:type verify_hashes: bool
:param verify_hashes: Indicates whether or not to verify
the tree hashes for each downloaded chunk.
"""
num_chunks = self._calc_num_chunks(chunk_size)
self._download_to_fileob(output_file, num_chunks, chunk_size,
verify_hashes, retry_exceptions)
def _download_to_fileob(self, fileobj, num_chunks, chunk_size, verify_hashes,
retry_exceptions):
for i in range(num_chunks):
byte_range = ((i * chunk_size), ((i + 1) * chunk_size) - 1)
data, expected_tree_hash = self._download_byte_range(
byte_range, retry_exceptions)
if verify_hashes:
actual_tree_hash = tree_hash_from_str(data)
if expected_tree_hash != actual_tree_hash:
raise TreeHashDoesNotMatchError(
"The calculated tree hash %s does not match the "
"expected tree hash %s for the byte range %s" % (
actual_tree_hash, expected_tree_hash, byte_range))
fileobj.write(data)
def _download_byte_range(self, byte_range, retry_exceptions):
# You can occasionally get socket.errors when downloading
# chunks from Glacier, so each chunk can be retried up
# to 5 times.
for _ in range(5):
try:
response = self.get_output(byte_range)
data = response.read()
expected_tree_hash = response['TreeHash']
return data, expected_tree_hash
except retry_exceptions as e:
continue
else:
raise DownloadArchiveError("There was an error downloading"
"byte range %s: %s" % (byte_range,
e))
| apache-2.0 |
bharathsreenivas/azure-quickstart-templates | elasticsearch-centos-3node/elasticinstall.py | 168 | 2840 | #!/usr/bin/env python
import subprocess
import socket
import sys
clustername = sys.argv[1]
number_nodes = sys.argv[2]
accountname = sys.argv[3]
accountkey = sys.argv[4]
print"inputs:\n"
print "clustername = " + clustername
print "accontname = " + accountname
print "accountkey = " + accountkey
hostname = socket.gethostname()
print "hostname: " + hostname
hostbase = "10.0.2.1"
print "hostbase: " + hostbase
def RunCommand(cmd):
ret = subprocess.check_output(cmd, shell=True)
print ret
return
cmds = ["yum -y install nano",
"yum -y install java-1.8.0-openjdk.x86_64",
"curl 'https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-1.7.3.noarch.rpm' -o 'elasticsearch-1.7.3.noarch.rpm'",
"rpm -ivh elasticsearch-1.7.3.noarch.rpm",
"systemctl enable elasticsearch.service",
"/usr/share/elasticsearch/bin/plugin -install royrusso/elasticsearch-HQ",
"/usr/share/elasticsearch/bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.8.2"]
print "start running installs"
for cmd in cmds:
RunCommand(cmd)
print "prep data disk for use"
cmds=["sfdisk /dev/sdc < sdc.layout",
"mkfs -t ext4 /dev/sdc1",
"mkdir /data",
"mount /dev/sdc1 /data"]
for cmd in cmds:
RunCommand(cmd)
temp = subprocess.check_output("blkid /dev/sdc1", shell=True)
uuid = temp[17:53]
with open("/etc/fstab", "a") as fstab:
fstab.write("UUID="+uuid+"\t/data\text4\tdefaults\t1\t2\n")
print RunCommand("chmod go+w /data")
datapath = "/data/elastic"
cmds=["mkdir " + datapath,
"chown -R elasticsearch:elasticsearch " + datapath,
"chmod 755 " + datapath]
for cmd in cmds:
RunCommand(cmd)
#re-write conf for heap
sysconf = '/etc/sysconfig/elasticsearch'
RunCommand("mv " + sysconf + " " + sysconf + ".bak")
heapsize="2g"
sysconfig = open(sysconf, 'w')
sysconfig.truncate()
sysconfig.write("ES_HEAP_SIZE=" + heapsize + "\n")
sysconfig.close()
print "start writing elastic config"
# write config
hosts=""
for n in range(0, int(number_nodes)):
hosts=hosts+hostbase+str(n)+","
hosts=hosts[:-1]
filename = '/etc/elasticsearch/elasticsearch.yml'
RunCommand("mv " + filename + " " + filename + ".bak")
config = open(filename, 'w')
config.truncate()
config.write("cluster.name: " + clustername + "\n")
config.write("node.name: " + hostname + "\n")
config.write("path.data: " + datapath + "\n")
config.write("discovery.zen.ping.multicast.enabled: false\n")
config.write("discovery.zen.ping.unicast.hosts: " + hosts + "\n")
config.write("node.master: true\n")
config.write("node.data: true\n")
config.write("cloud:\n")
config.write(" azure:\n")
config.write(" storage:\n")
config.write(" account: " + accountname + "\n")
config.write(" key: " + accountkey + "\n")
config.close()
print "finished writing config file"
RunCommand("systemctl start elasticsearch")
print "elastic install script finished"
| mit |
Joergen/zamboni | lib/misc/tests/test_log.py | 1 | 3825 | import logging
import json
from django.conf import settings
from nose.tools import eq_
from metlog.config import client_from_dict_config
import amo.tests
import commonware.log
from lib.log_settings_base import error_fmt
cfg = {
'version': 1,
'formatters': {
'error': {
'()': commonware.log.Formatter,
'datefmt': '%H:%M:%S',
'format': ('%s: [%%(USERNAME)s][%%(REMOTE_ADDR)s] %s'
% (settings.SYSLOG_TAG, error_fmt)),
},
},
'handlers': {
'test_syslog': {
'class': 'lib.misc.admin_log.ErrorSyslogHandler',
'formatter': 'error',
},
},
'loggers': {
'test.lib.misc.logging': {
'handlers': ['test_syslog'],
'level': 'ERROR',
'propagate': False,
},
},
}
class TestMetlogStdLibLogging(amo.tests.TestCase):
def setUp(self):
METLOG_CONF = {
'sender': {
'class': 'metlog.senders.logging.StdLibLoggingSender',
'logger_name': 'z.metlog',
}
}
self.metlog = client_from_dict_config(METLOG_CONF)
self.logger = logging.getLogger('z.metlog')
"""
When logging.config.dictConfig is used to configure logging
with a 'one-shot' config dictionary, any previously
instantiated singleton loggers (ie: all old loggers not in
the new config) will be explicitly disabled.
"""
self.logger.disabled = False
self._orig_handlers = self.logger.handlers
self.handler = logging.handlers.BufferingHandler(65536)
self.logger.handlers = [self.handler]
def tearDown(self):
self.logger.handlers = self._orig_handlers
def test_oldstyle_sends_msg(self):
msg = 'error'
self.metlog.error(msg)
logrecord = self.handler.buffer[-1]
self.assertEqual(logrecord.msg, msg)
self.assertEqual(logrecord.levelname, 'ERROR')
msg = 'info'
self.metlog.info(msg)
logrecord = self.handler.buffer[-1]
self.assertEqual(logrecord.msg, msg)
self.assertEqual(logrecord.levelname, 'INFO')
msg = 'warn'
self.metlog.warn(msg)
logrecord = self.handler.buffer[-1]
self.assertEqual(logrecord.msg, msg)
self.assertEqual(logrecord.levelname, 'WARNING')
# debug shouldn't log
msg = 'debug'
self.metlog.debug(msg)
logrecord = self.handler.buffer[-1]
self.assertNotEqual(logrecord.msg, msg)
self.assertNotEqual(logrecord.levelname, 'DEBUG')
def test_other_sends_json(self):
timer = 'footimer'
elapsed = 4
self.metlog.timer_send(timer, elapsed)
logrecord = self.handler.buffer[-1]
self.assertEqual(logrecord.levelname, 'INFO')
msg = json.loads(logrecord.msg)
self.assertEqual(msg['type'], 'timer')
self.assertEqual(msg['payload'], str(elapsed))
self.assertEqual(msg['fields']['name'], timer)
class TestRaven(amo.tests.TestCase):
def setUp(self):
"""
We need to set the settings.METLOG instance to use a
DebugCaptureSender so that we can inspect the sent messages.
"""
metlog = settings.METLOG
METLOG_CONF = {
'logger': 'zamboni',
'sender': {'class': 'metlog.senders.DebugCaptureSender'},
}
from metlog.config import client_from_dict_config
self.metlog = client_from_dict_config(METLOG_CONF, metlog)
def test_send_raven(self):
try:
1 / 0
except:
self.metlog.raven('blah')
eq_(len(self.metlog.sender.msgs), 1)
msg = json.loads(self.metlog.sender.msgs[0])
eq_(msg['type'], 'sentry')
| bsd-3-clause |
deepinsight/Deformable-ConvNets | lib/dataset/pycocotools/coco.py | 5 | 18005 | __author__ = 'tylin'
__version__ = '1.0.1'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import datetime
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
from skimage.draw import polygon
import urllib
import copy
import itertools
import mask
import os
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset = {}
self.anns = []
self.imgToAnns = {}
self.catToImgs = {}
self.imgs = {}
self.cats = {}
if not annotation_file == None:
print 'loading annotations into memory...'
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
print 'Done (t=%0.2fs)'%(time.time()- tic)
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print 'creating index...'
anns = {}
imgToAnns = {}
catToImgs = {}
cats = {}
imgs = {}
if 'annotations' in self.dataset:
imgToAnns = {ann['image_id']: [] for ann in self.dataset['annotations']}
anns = {ann['id']: [] for ann in self.dataset['annotations']}
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']] += [ann]
anns[ann['id']] = ann
if 'images' in self.dataset:
imgs = {im['id']: {} for im in self.dataset['images']}
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
cats = {cat['id']: [] for cat in self.dataset['categories']}
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
catToImgs = {cat['id']: [] for cat in self.dataset['categories']}
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']] += [ann['image_id']]
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
# this can be changed by defaultdict
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
if datasetType == 'instances':
ax = plt.gca()
polygons = []
color = []
for ann in anns:
c = np.random.random((1, 3)).tolist()[0]
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly, True,alpha=0.4))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = mask.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print ann['caption']
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
# res.dataset['info'] = copy.deepcopy(self.dataset['info'])
# res.dataset['licenses'] = copy.deepcopy(self.dataset['licenses'])
print 'Loading and preparing results... '
tic = time.time()
anns = json.load(open(resFile))
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = mask.area([ann['segmentation']])[0]
if not 'bbox' in ann:
ann['bbox'] = mask.toBbox([ann['segmentation']])[0]
ann['id'] = id+1
ann['iscrowd'] = 0
print 'DONE (t=%0.2fs)'%(time.time()- tic)
res.dataset['annotations'] = anns
res.createIndex()
return res
def download( self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print 'Please specify target directory'
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urllib.urlretrieve(img['coco_url'], fname)
print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic)
@staticmethod
def decodeMask(R):
"""
Decode binary mask M encoded via run-length encoding.
:param R (object RLE) : run-length encoding of binary mask
:return: M (bool 2D array) : decoded binary mask
"""
N = len(R['counts'])
M = np.zeros( (R['size'][0]*R['size'][1], ))
n = 0
val = 1
for pos in range(N):
val = not val
for c in range(R['counts'][pos]):
R['counts'][pos]
M[n] = val
n += 1
return M.reshape((R['size']), order='F')
@staticmethod
def encodeMask(M):
"""
Encode binary mask M using run-length encoding.
:param M (bool 2D array) : binary mask to encode
:return: R (object RLE) : run-length encoding of binary mask
"""
[h, w] = M.shape
M = M.flatten(order='F')
N = len(M)
counts_list = []
pos = 0
# counts
counts_list.append(1)
diffs = np.logical_xor(M[0:N-1], M[1:N])
for diff in diffs:
if diff:
pos +=1
counts_list.append(1)
else:
counts_list[pos] += 1
# if array starts from 1. start with 0 counts for 0
if M[0] == 1:
counts_list = [0] + counts_list
return {'size': [h, w],
'counts': counts_list ,
}
@staticmethod
def segToMask( S, h, w ):
"""
Convert polygon segmentation to binary mask.
:param S (float array) : polygon segmentation mask
:param h (int) : target mask height
:param w (int) : target mask width
:return: M (bool 2D array) : binary mask
"""
M = np.zeros((h,w), dtype=np.bool)
for s in S:
N = len(s)
rr, cc = polygon(np.array(s[1:N:2]).clip(max=h-1), \
np.array(s[0:N:2]).clip(max=w-1)) # (y, x)
M[rr, cc] = 1
return M
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = mask.frPyObjects(segm, h, w)
rle = mask.merge(rles)
elif type(segm['counts']) == list:
# uncompressed RLE
rle = mask.frPyObjects(segm, h, w)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
m = mask.decode(rle)
return m
| apache-2.0 |
janslow/boto | boto/rds/event.py | 220 | 1876 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Event(object):
def __init__(self, connection=None):
self.connection = connection
self.message = None
self.source_identifier = None
self.source_type = None
self.engine = None
self.date = None
def __repr__(self):
return '"%s"' % self.message
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'SourceIdentifier':
self.source_identifier = value
elif name == 'SourceType':
self.source_type = value
elif name == 'Message':
self.message = value
elif name == 'Date':
self.date = value
else:
setattr(self, name, value)
| mit |
rsjudge17/romraider | 3rdparty/IzPack/utils/wrappers/izpack2exe/izpack2exe.py | 8 | 3448 | #!/usr/bin/env python
# ........................................................................... #
#
# IzPack - Copyright 2007, 2008 Julien Ponge, All Rights Reserved.
#
# http://izpack.org/
# http://izpack.codehaus.org/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ........................................................................... #
import os
import sys
from shutil import *
from optparse import OptionParser
def parse_options():
parser = OptionParser()
parser.add_option("--file", action="append", dest="file",
help="The installer JAR file / files")
parser.add_option("--output", action="store", dest="output",
default="setup.exe",
help="The executable file")
parser.add_option("--with-7z", action="store", dest="p7z",
default="7za",
help="Path to the 7-Zip executable")
parser.add_option("--with-upx", action="store", dest="upx",
default="upx",
help="Path to the UPX executable")
parser.add_option("--no-upx", action="store_true", dest="no_upx",
default=False,
help="Do not use UPX to further compress the output")
parser.add_option("--launch-file", action="store", dest="launch",
default="launcher.exe",
help="File to launch after extract")
(options, args) = parser.parse_args()
if (options.file is None):
parser.error("no installer file has been given")
return options
def create_exe(settings):
filename = os.path.basename(settings.launch)
if(len(settings.file) == 1):
filename = os.path.basename(settings.file[0])
files = " ".join(settings.file);
p7z = '"%s" a -t7z -mx=9 -ms=off installer.7z %s' % (settings.p7z, files)
os.system(p7z)
config = open('config.txt', 'w')
config.write(';!@Install@!UTF-8!\r\n')
config.write('Title="IzPack"\r\n')
config.write('Progress="yes"\r\n')
config.write('ExecuteFile="%s"\r\n' % filename)
config.write(';!@InstallEnd@!\r\n')
config.close()
if settings.p7z == '7za':
sfx = os.path.join(os.path.dirname(sys.argv[0]), '7zS.sfx')
else:
sfx = os.path.join(os.path.dirname(settings.p7z), '7zS.sfx')
files = [sfx, 'config.txt', 'installer.7z']
output = open(settings.output, 'wb')
for f in files:
in_file = open(f, 'rb')
copyfileobj(in_file, output, 2048)
in_file.close()
output.close()
if (not settings.no_upx):
upx = '"%s" --ultra-brute %s' % (settings.upx, settings.output)
os.system(upx)
os.remove('config.txt')
os.remove('installer.7z')
def main():
create_exe(parse_options())
if __name__ == "__main__":
main()
| gpl-2.0 |
nihilifer/txsocksx | txsocksx/grammar.py | 3 | 2468 | # Copyright (c) Aaron Gallagher <_@habnab.it>
# See COPYING for details.
import socket
grammarSource = r"""
byte = anything:b -> ord(b)
short = byte:high byte:low -> (high << 8) | low
cstring = <(~'\x00' anything)*>:string '\x00' -> string
ipv4Address = <anything{4}>:packed -> socket.inet_ntop(socket.AF_INET, packed)
ipv6Address = <anything{16}>:packed -> socket.inet_ntop(socket.AF_INET6, packed)
SOCKS4Command = ( '\x01' -> 'tcp-connect'
| '\x02' -> 'tcp-bind'
)
SOCKS4HostUser = ipv4Address:host cstring:user -> (host, user)
SOCKS4aHostUser = ( '\x00'{3} ~'\x00' anything cstring:user cstring:host -> (host, user)
| SOCKS4HostUser
)
SOCKS4Request = '\x04' SOCKS4Command:command short:port SOCKS4aHostUser:hostuser -> (command, port) + hostuser
SOCKS4Response = '\x00' byte:status short:port ipv4Address:address -> (status, address, port)
SOCKS4ServerState_initial = SOCKS4Request:request -> receiver.clientRequest(*request)
SOCKS4ClientState_initial = SOCKS4Response:response -> receiver.serverResponse(*response)
SOCKS5Command = (SOCKS4Command | '\x03' -> 'udp-associate')
SOCKS5Hostname = byte:length <anything{length}>:host -> host
SOCKS5Address = ( '\x01' ipv4Address:address -> address
| '\x03' SOCKS5Hostname:host -> host
| '\x04' ipv6Address:address -> address
)
SOCKS5ServerAuthSelection = '\x05' anything
SOCKS5ServerLoginResponse = anything anything:status -> status == '\x00'
SOCKS5ServerResponse = '\x05' byte:status '\x00' SOCKS5Address:address short:port -> (status, address, port)
SOCKS5ClientGreeting = '\x05' byte:authMethodCount byte{authMethodCount}:authMethods -> authMethods or []
SOCKS5ClientRequest = '\x05' SOCKS5Command:command '\x00' SOCKS5Address:address short:port -> (command, address, port)
SOCKS5ServerState_initial = SOCKS5ClientGreeting:authMethods -> receiver.authRequested(authMethods)
SOCKS5ServerState_readRequest = SOCKS5ClientRequest:request -> receiver.clientRequest(*request)
SOCKS5ClientState_initial = SOCKS5ServerAuthSelection:selection -> receiver.authSelected(selection)
SOCKS5ClientState_readLoginResponse = SOCKS5ServerLoginResponse:response -> receiver.loginResponse(response)
SOCKS5ClientState_readResponse = SOCKS5ServerResponse:response -> receiver.serverResponse(*response)
SOCKSState_readData = anything:data -> receiver.dataReceived(data)
"""
bindings = {'socket': socket}
| isc |
cnxsoft/telechips-linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
arowser/wireshark-xcoin | tools/WiresharkXML.py | 52 | 8992 | """
Routines for reading PDML produced from TShark.
Copyright (c) 2003, 2013 by Gilbert Ramirez <gram@alumni.rice.edu>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import sys
import xml.sax
from xml.sax.saxutils import quoteattr
import cStringIO as StringIO
class CaptureFile:
pass
class FoundItException(Exception):
"""Used internally for exiting a tree search"""
pass
class PacketList:
"""Holds Packet objects, and has methods for finding
items within it."""
def __init__(self, children=None):
if children == None:
self.children = []
else:
self.children = children
def __getitem__(self, index):
"""We act like a list."""
return self.children[index]
def __len__(self):
return len(self.children)
def item_exists(self, name):
"""Does an item with name 'name' exist in this
PacketList? Returns True or False."""
for child in self.children:
if child.name == name:
return True
try:
for child in self.children:
child._item_exists(name)
except FoundItException:
return True
return False
def _item_exists(self, name):
for child in self.children:
if child.name == name:
raise FoundItException
child._item_exists(name)
def get_items(self, name, items=None):
"""Return all items that match the name 'name'.
They are returned in order of a depth-first-search."""
if items == None:
top_level = 1
items = []
else:
top_level = 0
for child in self.children:
if child.name == name:
items.append(child)
child.get_items(name, items)
if top_level:
return PacketList(items)
def get_items_before(self, name, before_item, items=None):
"""Return all items that match the name 'name' that
exist before the before_item. The before_item is an object.
They results are returned in order of a depth-first-search.
This function allows you to find fields from protocols that occur
before other protocols. For example, if you have an HTTP
protocol, you can find all tcp.dstport fields *before* that HTTP
protocol. This helps analyze in the presence of tunneled protocols."""
if items == None:
top_level = 1
items = []
else:
top_level = 0
for child in self.children:
if top_level == 1 and child == before_item:
break
if child.name == name:
items.append(child)
# Call get_items because the 'before_item' applies
# only to the top level search.
child.get_items(name, items)
if top_level:
return PacketList(items)
class ProtoTreeItem(PacketList):
def __init__(self, xmlattrs):
PacketList.__init__(self)
self.name = xmlattrs.get("name", "")
self.showname = xmlattrs.get("showname", "")
self.pos = xmlattrs.get("pos", "")
self.size = xmlattrs.get("size", "")
self.value = xmlattrs.get("value", "")
self.show = xmlattrs.get("show", "")
self.hide = xmlattrs.get("hide", "")
def add_child(self, child):
self.children.append(child)
def get_name(self):
return self.name
def get_showname(self):
return self.showname
def get_pos(self):
return self.pos
def get_size(self):
return self.size
def get_value(self):
return self.value
def get_show(self):
return self.show
def get_hide(self):
return self.hide
def dump(self, fh=sys.stdout):
if self.name:
print >> fh, " name=%s" % (quoteattr(self.name),),
if self.showname:
print >> fh, "showname=%s" % (quoteattr(self.showname),),
if self.pos:
print >> fh, "pos=%s" % (quoteattr(self.pos),),
if self.size:
print >> fh, "size=%s" % (quoteattr(self.size),),
if self.value:
print >> fh, "value=%s" % (quoteattr(self.value),),
if self.show:
print >> fh, "show=%s" % (quoteattr(self.show),),
if self.hide:
print >> fh, "hide=%s" % (quoteattr(self.hide),),
class Packet(ProtoTreeItem, PacketList):
def dump(self, fh=sys.stdout, indent=0):
print >> fh, " " * indent, "<packet>"
indent += 1
for child in self.children:
child.dump(fh, indent)
print >> fh, " " * indent, "</packet>"
class Protocol(ProtoTreeItem):
def dump(self, fh=sys.stdout, indent=0):
print >> fh, "%s<proto " % (" " * indent,),
ProtoTreeItem.dump(self, fh)
print >> fh, '>'
indent += 1
for child in self.children:
child.dump(fh, indent)
print >> fh, " " * indent, "</proto>"
class Field(ProtoTreeItem):
def dump(self, fh=sys.stdout, indent=0):
print >> fh, "%s<field " % (" " * indent,),
ProtoTreeItem.dump(self, fh)
if self.children:
print >> fh, ">"
indent += 1
for child in self.children:
child.dump(fh, indent)
print >> fh, " " * indent, "</field>"
else:
print >> fh, "/>"
class ParseXML(xml.sax.handler.ContentHandler):
ELEMENT_FILE = "pdml"
ELEMENT_FRAME = "packet"
ELEMENT_PROTOCOL = "proto"
ELEMENT_FIELD = "field"
def __init__(self, cb):
self.cb = cb
self.chars = ""
self.element_stack = []
def startElement(self, name, xmlattrs):
self.chars = ""
if name == self.ELEMENT_FILE:
# Eventually, we should check version number of pdml here
elem = CaptureFile()
elif name == self.ELEMENT_FRAME:
elem = Packet(xmlattrs)
elif name == self.ELEMENT_PROTOCOL:
elem = Protocol(xmlattrs)
elif name == self.ELEMENT_FIELD:
elem = Field(xmlattrs)
else:
sys.exit("Unknown element: %s" % (name,))
self.element_stack.append(elem)
def endElement(self, name):
elem = self.element_stack.pop()
# if isinstance(elem, Field):
# if elem.get_name() == "frame.number":
# print >> sys.stderr, "Packet:", elem.get_show()
# Add element as child to previous element as long
# as there is more than 1 element in the stack. Only
# one element in the stack means that the the element in
# the stack is the single CaptureFile element, and we don't
# want to add this element to that, as we only want one
# Packet element in memory at a time.
if len(self.element_stack) > 1:
parent_elem = self.element_stack[-1]
parent_elem.add_child(elem)
self.chars = ""
# If we just finished a Packet element, hand it to the
# user's callback.
if isinstance(elem, Packet):
self.cb(elem)
def characters(self, chars):
self.chars = self.chars + chars
def _create_parser(cb):
"""Internal function for setting up the SAX parser."""
# Create a parser
parser = xml.sax.make_parser()
# Create the handler
handler = ParseXML(cb)
# Tell the parser to use our handler
parser.setContentHandler(handler)
# Don't fetch the DTD, in case it is listed
parser.setFeature(xml.sax.handler.feature_external_ges, False)
return parser
def parse_fh(fh, cb):
"""Parse a PDML file, given filehandle, and call the callback function (cb),
once for each Packet object."""
parser = _create_parser(cb)
# Parse the file
parser.parse(fh)
# Close the parser ; this is erroring out, but I'm not sure why.
#parser.close()
def parse_string(text, cb):
"""Parse the PDML contained in a string."""
stream = StringIO.StringIO(text)
parse_fh(stream, cb)
def _test():
import sys
def test_cb(obj):
pass
filename = sys.argv[1]
fh = open(filename, "r")
parse_fh(fh, test_cb)
if __name__ == '__main__':
_test()
| gpl-2.0 |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/charsetgroupprober.py | 2929 | 3791 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mActiveNum = 0
self._mProbers = []
self._mBestGuessProber = None
def reset(self):
CharSetProber.reset(self)
self._mActiveNum = 0
for prober in self._mProbers:
if prober:
prober.reset()
prober.active = True
self._mActiveNum += 1
self._mBestGuessProber = None
def get_charset_name(self):
if not self._mBestGuessProber:
self.get_confidence()
if not self._mBestGuessProber:
return None
# self._mBestGuessProber = self._mProbers[0]
return self._mBestGuessProber.get_charset_name()
def feed(self, aBuf):
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
continue
st = prober.feed(aBuf)
if not st:
continue
if st == constants.eFoundIt:
self._mBestGuessProber = prober
return self.get_state()
elif st == constants.eNotMe:
prober.active = False
self._mActiveNum -= 1
if self._mActiveNum <= 0:
self._mState = constants.eNotMe
return self.get_state()
return self.get_state()
def get_confidence(self):
st = self.get_state()
if st == constants.eFoundIt:
return 0.99
elif st == constants.eNotMe:
return 0.01
bestConf = 0.0
self._mBestGuessProber = None
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
if constants._debug:
sys.stderr.write(prober.get_charset_name()
+ ' not active\n')
continue
cf = prober.get_confidence()
if constants._debug:
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(), cf))
if bestConf < cf:
bestConf = cf
self._mBestGuessProber = prober
if not self._mBestGuessProber:
return 0.0
return bestConf
# else:
# self._mBestGuessProber = self._mProbers[0]
# return self._mBestGuessProber.get_confidence()
| bsd-2-clause |
shadow000902/blog_source | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py | 578 | 116086 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = [
'executable',
'shared_library',
'loadable_module',
'mac_kernel_extension',
]
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section and section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
# Open the build file for read ('r') with universal-newlines mode ('U')
# to make sure platform specific newlines ('\r\n' or '\r') are converted to '\n'
# which otherwise will fail eval()
build_file_contents = open(build_file_path, 'rU').read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
try:
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
except Exception, e:
raise GypError("%s while executing command '%s' in %s" %
(e, contents, build_file))
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d while in %s." %
(contents, p.returncode, build_file))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependency.DeepDependencies(dependencies)
dependencies.add(dependency.ref)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables, mac kernel extensions and loadable modules are already fully
# and finally linked. Nothing else can be a link dependency of them, there
# can only be dependencies in the sense that a dependent target might run
# an executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module',
'mac_kernel_extension'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'mac_kernel_extension', 'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check):
if not duplicate_basename_check:
return
if target_dict.get('type', None) != 'static_library':
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
target + error + 'libtool on Mac cannot handle that. Use '
'--no-duplicate-basename-check to disable this validation.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, duplicate_basename_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| mit |
shiquanwang/numba | numba/tests/closures/test_closure.py | 1 | 8088 | import numba; from numba import *; from numba.error import NumbaError
from numba.testing.test_support import rewrite_doc
@autojit
def error1():
def inner():
pass
@autojit
def error2():
@autojit
def inner():
pass
@autojit
def error3():
inner(10, 20, 30)
@jit(restype=void, argtypes=[int_, int_, int_])
def inner(a, b, c):
print(str(a) + ' ' + str(b) + ' ' + str(c))
@autojit
def error4():
@jit(restype=void, argtypes=[int_, int_, int_])
def inner(a, b, c):
print(str(a) + ' ' + str(b) + ' ' + str(c))
inner(10, a=20, b=30, c=40)
@autojit
def error5():
@jit(restype=void, argtypes=[int_, int_, int_])
def inner(a, b, c):
print(str(a) + ' ' + str(b) + ' ' + str(c))
inner(10, a=20, b=30)
@autojit(warnstyle='simple')
def closure1():
a = 10
@jit(restype=void, argtypes=[int_])
def inner(arg):
print(str(arg))
return inner
@autojit
def closure2():
a = 10
@jit(restype=void, argtypes=[int_])
def inner(arg):
print(str(arg))
inner(arg=a)
@autojit
def closure3():
a = 10
@jit('void()')
def inner():
print(a)
a = 12
inner()
@autojit
def closure4():
a = 10
@jit('void()')
def inner():
print(a)
a = 12
return inner
@autojit
def nested_closure():
a = 20
b = 21
@jit('object_()')
def c1():
b = 10
@jit('void()')
def c2():
print(str(a) + ' ' + str(b))
return c2
@jit('void()')
def c3():
print(b)
return c1, c3
__doc__ = """
>>> error1()
Traceback (most recent call last):
...
NumbaError: 5:4: Closure must be decorated with 'jit' or 'autojit'
>>> error2()
Traceback (most recent call last):
...
NumbaError: 10:5: Dynamic closures not yet supported, use @jit
>>> error3()
Traceback (most recent call last):
...
NumbaError: 16:4: local variable 'inner' referenced before assignment
>>> error4()
Traceback (most recent call last):
...
NumbaError: 28:4: Expected 3 arguments, got 4
>>> error5()
Traceback (most recent call last):
...
NumbaError: Got multiple values for positional argument 'a'
Test closures
>>> closure1().__name__
Warning 40:4: Unused variable 'a'
'inner'
>>> closure1()()
Traceback (most recent call last):
...
TypeError: function takes exactly 1 argument (0 given)
>>> closure1()(object())
Traceback (most recent call last):
...
TypeError: an integer is required
>>> closure1()(10.0)
10
>>> closure2()
10
>>> closure3()
12
>>> func = closure4()
>>> print(func.__name__)
inner
>>> field, = func.__closure__._numba_attrs._fields_
>>> import ctypes
>>> print((field[0], field[1] == ctypes.c_int))
('a', True)
>>> print(func.__closure__._numba_attrs.a)
12
>>> func()
12
>>> c1, c3 = nested_closure()
>>> c1.__name__
'c1'
>>> c3.__name__
'c3'
>>> c1().__name__
'c2'
>>> c1()()
20 10
>>> c3()
21
"""
@autojit
def closure_arg(a):
@jit('object_(object_)')
def closure1(b):
print(str(a) + ' ' + str(b))
x = 10 + int_(b)
@jit('object_(object_)')
def closure2(c):
print(str(a) + ' ' + str(b) + ' ' + str(c) + ' ' + str(x))
y = double(x) + double(c)
@jit('void(object_)')
def closure3(d):
print(str(a) + ' ' + str(b) + ' ' + str(c) + ' ' + str(d) + ' ' + str(x) + ' ' + str(y))
return closure3
return closure2
return closure1
__doc__ += \
"""
>>> closure1 = closure_arg(1)
>>> closure1.__name__
'closure1'
>>> closure2_1 = closure1(2)
1 2
>>> closure2_1.__name__
'closure2'
>>> closure2_2 = closure1(3)
1 3
>>> closure2_2.__name__
'closure2'
>>> closure3_1 = closure2_1(4)
1 2 4 12
>>> closure3_1.__name__
'closure3'
>>> closure3_2 = closure2_2(5)
1 3 5 13
>>> closure3_2.__name__
'closure3'
>>> closure3_1(6)
1 2 4 6 12 16.0
>>> closure3_2(7)
1 3 5 7 13 18.0
"""
@autojit
def closure_arg_simple(a):
@jit('object_(object_)')
def inner(b):
print(str(a) + ' ' + str(b))
@jit('void(object_)')
def inner_inner(c):
print(str(a) + ' ' + str(b) + ' ' + str(c))
return inner_inner
return inner
__doc__ += """
>>> closure_arg_simple(10)(20)(30)
10 20
10 20 30
"""
@autojit
def closure_skip_level(a):
@jit('object_()')
def inner():
@jit('void()')
def inner_inner():
print(str(a))
return inner_inner
return inner
__doc__ += """
>>> closure_skip_level(10)()()
10
"""
@autojit
def objects(s):
@jit('object_()')
def inner():
return s.upper()
return inner
__doc__ += """
>>> objects("hello")()
'HELLO'
"""
@autojit
def wrong_signature(s):
@jit('object_(object_)')
def inner():
return s.upper()
return inner
__doc__ += """
>>> try_(wrong_signature, "foo")
--------------------- Numba Encountered Errors or Warnings ---------------------
@jit('object_(object_)')
-----^
Error 262:5: Expected 1 arguments type(s), got 0
--------------------------------------------------------------------------------
NumbaError: 262:5: Expected 1 arguments type(s), got 0
"""
@autojit
def wrong_restype():
@jit('object_()')
def inner():
pass
return inner
__doc__ += """
>>> try_(wrong_restype)
--------------------- Numba Encountered Errors or Warnings ---------------------
@jit('object_()')
----^
Error 281:4: Function with non-void return does not return a value
--------------------------------------------------------------------------------
NumbaError: 281:4: Function with non-void return does not return a value
"""
#
### Test signatures like @double(object_)
#
@autojit
def signature_dec():
@object_()
def inner():
return "hello"
return inner
__doc__ += """
>>> signature_dec()()
'hello'
"""
@autojit
def wrong_signature2(s):
@object_(object_)
def inner():
return s.upper()
return inner
__doc__ += """
>>> try_(wrong_signature2, "foo")
--------------------- Numba Encountered Errors or Warnings ---------------------
@object_(object_)
-----^
Error 313:5: Expected 1 arguments type(s), got 0
--------------------------------------------------------------------------------
NumbaError: 313:5: Expected 1 arguments type(s), got 0
"""
@autojit
def get_closure(arg):
@void()
def closure():
print(arg)
closure()
return closure
@autojit
def test_call_closure():
closure = get_closure(10.0)
closure() # TODO: This still goes through the object layer, amend
__doc__ += """
>>> test_call_closure()
10.0
10.0
"""
@autojit
def test_call_closure_from_closure():
closure = get_closure(10.0)
@void()
def inner():
closure()
return inner
__doc__ += """
>>> test_call_closure_from_closure()()
10.0
10.0
"""
@autojit
def test_closure_loop():
"""
>>> test_closure_loop()
0 3
1 3
2 3
<BLANKLINE>
0 3
1 3
2 3
"""
cellvar = 3
@jit(void())
def inner():
for i in range(cellvar):
print(str(i) + ' ' + str(cellvar))
print('')
for i in range(cellvar):
for j in range(cellvar):
if i == j:
print(str(i) + ' ' + str(cellvar))
inner()
@autojit(locals=dict(var=int_), warn=False)
def test_closure_outer_locals():
"""
>>> test_closure_outer_locals()
"""
var = 10
@jit(void())
def inner():
var = "hello"
inner()
#__doc__ = rewrite_doc(__doc__)
def try_(func, *args):
try:
func(*args)
except NumbaError as e:
print("%s%s: %s" % ('numba.error.' if numba.PY3 else '',
type(e).__name__, e))
if __name__ == '__main__':
# closure1 = closure_arg(1)
# print closure1.__name__
# closure1(10)
# test_call_closure()
# closure4()
# signature_dec()()
# test_closure_outer_locals()
# test_closure_loop()
# test_closure_outer_locals()
# test_call_closure_from_closure()()
# wrong_restype()
import numba
numba.testing.testmod()
| bsd-2-clause |
swenson/sagewiki | unidecode/unidecode/x030.py | 248 | 4037 | data = (
' ', # 0x00
', ', # 0x01
'. ', # 0x02
'"', # 0x03
'[JIS]', # 0x04
'"', # 0x05
'/', # 0x06
'0', # 0x07
'<', # 0x08
'> ', # 0x09
'<<', # 0x0a
'>> ', # 0x0b
'[', # 0x0c
'] ', # 0x0d
'{', # 0x0e
'} ', # 0x0f
'[(', # 0x10
')] ', # 0x11
'@', # 0x12
'X ', # 0x13
'[', # 0x14
'] ', # 0x15
'[[', # 0x16
']] ', # 0x17
'((', # 0x18
')) ', # 0x19
'[[', # 0x1a
']] ', # 0x1b
'~ ', # 0x1c
'``', # 0x1d
'\'\'', # 0x1e
',,', # 0x1f
'@', # 0x20
'1', # 0x21
'2', # 0x22
'3', # 0x23
'4', # 0x24
'5', # 0x25
'6', # 0x26
'7', # 0x27
'8', # 0x28
'9', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'~', # 0x30
'+', # 0x31
'+', # 0x32
'+', # 0x33
'+', # 0x34
'', # 0x35
'@', # 0x36
' // ', # 0x37
'+10+', # 0x38
'+20+', # 0x39
'+30+', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'', # 0x3e
'', # 0x3f
'[?]', # 0x40
'a', # 0x41
'a', # 0x42
'i', # 0x43
'i', # 0x44
'u', # 0x45
'u', # 0x46
'e', # 0x47
'e', # 0x48
'o', # 0x49
'o', # 0x4a
'ka', # 0x4b
'ga', # 0x4c
'ki', # 0x4d
'gi', # 0x4e
'ku', # 0x4f
'gu', # 0x50
'ke', # 0x51
'ge', # 0x52
'ko', # 0x53
'go', # 0x54
'sa', # 0x55
'za', # 0x56
'shi', # 0x57
'zi', # 0x58
'su', # 0x59
'zu', # 0x5a
'se', # 0x5b
'ze', # 0x5c
'so', # 0x5d
'zo', # 0x5e
'ta', # 0x5f
'da', # 0x60
'chi', # 0x61
'di', # 0x62
'tsu', # 0x63
'tsu', # 0x64
'du', # 0x65
'te', # 0x66
'de', # 0x67
'to', # 0x68
'do', # 0x69
'na', # 0x6a
'ni', # 0x6b
'nu', # 0x6c
'ne', # 0x6d
'no', # 0x6e
'ha', # 0x6f
'ba', # 0x70
'pa', # 0x71
'hi', # 0x72
'bi', # 0x73
'pi', # 0x74
'hu', # 0x75
'bu', # 0x76
'pu', # 0x77
'he', # 0x78
'be', # 0x79
'pe', # 0x7a
'ho', # 0x7b
'bo', # 0x7c
'po', # 0x7d
'ma', # 0x7e
'mi', # 0x7f
'mu', # 0x80
'me', # 0x81
'mo', # 0x82
'ya', # 0x83
'ya', # 0x84
'yu', # 0x85
'yu', # 0x86
'yo', # 0x87
'yo', # 0x88
'ra', # 0x89
'ri', # 0x8a
'ru', # 0x8b
're', # 0x8c
'ro', # 0x8d
'wa', # 0x8e
'wa', # 0x8f
'wi', # 0x90
'we', # 0x91
'wo', # 0x92
'n', # 0x93
'vu', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'"', # 0x9d
'"', # 0x9e
'[?]', # 0x9f
'[?]', # 0xa0
'a', # 0xa1
'a', # 0xa2
'i', # 0xa3
'i', # 0xa4
'u', # 0xa5
'u', # 0xa6
'e', # 0xa7
'e', # 0xa8
'o', # 0xa9
'o', # 0xaa
'ka', # 0xab
'ga', # 0xac
'ki', # 0xad
'gi', # 0xae
'ku', # 0xaf
'gu', # 0xb0
'ke', # 0xb1
'ge', # 0xb2
'ko', # 0xb3
'go', # 0xb4
'sa', # 0xb5
'za', # 0xb6
'shi', # 0xb7
'zi', # 0xb8
'su', # 0xb9
'zu', # 0xba
'se', # 0xbb
'ze', # 0xbc
'so', # 0xbd
'zo', # 0xbe
'ta', # 0xbf
'da', # 0xc0
'chi', # 0xc1
'di', # 0xc2
'tsu', # 0xc3
'tsu', # 0xc4
'du', # 0xc5
'te', # 0xc6
'de', # 0xc7
'to', # 0xc8
'do', # 0xc9
'na', # 0xca
'ni', # 0xcb
'nu', # 0xcc
'ne', # 0xcd
'no', # 0xce
'ha', # 0xcf
'ba', # 0xd0
'pa', # 0xd1
'hi', # 0xd2
'bi', # 0xd3
'pi', # 0xd4
'hu', # 0xd5
'bu', # 0xd6
'pu', # 0xd7
'he', # 0xd8
'be', # 0xd9
'pe', # 0xda
'ho', # 0xdb
'bo', # 0xdc
'po', # 0xdd
'ma', # 0xde
'mi', # 0xdf
'mu', # 0xe0
'me', # 0xe1
'mo', # 0xe2
'ya', # 0xe3
'ya', # 0xe4
'yu', # 0xe5
'yu', # 0xe6
'yo', # 0xe7
'yo', # 0xe8
'ra', # 0xe9
'ri', # 0xea
'ru', # 0xeb
're', # 0xec
'ro', # 0xed
'wa', # 0xee
'wa', # 0xef
'wi', # 0xf0
'we', # 0xf1
'wo', # 0xf2
'n', # 0xf3
'vu', # 0xf4
'ka', # 0xf5
'ke', # 0xf6
'va', # 0xf7
'vi', # 0xf8
've', # 0xf9
'vo', # 0xfa
'', # 0xfb
'', # 0xfc
'"', # 0xfd
'"', # 0xfe
)
| gpl-2.0 |
dsajkl/reqiop | common/test/acceptance/fixtures/course.py | 18 | 18224 | """
Fixture to create a course and course components (XBlocks).
"""
import mimetypes
import json
import re
import datetime
import requests
from textwrap import dedent
from collections import namedtuple
from path import path
from lazy import lazy
from opaque_keys.edx.keys import CourseKey
from . import STUDIO_BASE_URL
class StudioApiLoginError(Exception):
"""
Error occurred while logging in to the Studio API.
"""
pass
class StudioApiFixture(object):
"""
Base class for fixtures that use the Studio restful API.
"""
def __init__(self):
# Info about the auto-auth user used to create the course.
self.user = {}
@lazy
def session(self):
"""
Log in as a staff user, then return a `requests` `session` object for the logged in user.
Raises a `StudioApiLoginError` if the login fails.
"""
# Use auto-auth to retrieve the session for a logged in user
session = requests.Session()
response = session.get(STUDIO_BASE_URL + "/auto_auth?staff=true")
# Return the session from the request
if response.ok:
# auto_auth returns information about the newly created user
# capture this so it can be used by by the testcases.
user_pattern = re.compile('Logged in user {0} \({1}\) with password {2} and user_id {3}'.format(
'(?P<username>\S+)', '(?P<email>[^\)]+)', '(?P<password>\S+)', '(?P<user_id>\d+)'))
user_matches = re.match(user_pattern, response.text)
if user_matches:
self.user = user_matches.groupdict()
return session
else:
msg = "Could not log in to use Studio restful API. Status code: {0}".format(response.status_code)
raise StudioApiLoginError(msg)
@lazy
def session_cookies(self):
"""
Log in as a staff user, then return the cookies for the session (as a dict)
Raises a `StudioApiLoginError` if the login fails.
"""
return {key: val for key, val in self.session.cookies.items()}
@lazy
def headers(self):
"""
Default HTTP headers dict.
"""
return {
'Content-type': 'application/json',
'Accept': 'application/json',
'X-CSRFToken': self.session_cookies.get('csrftoken', '')
}
class XBlockFixtureDesc(object):
"""
Description of an XBlock, used to configure a course fixture.
"""
def __init__(self, category, display_name, data=None, metadata=None, grader_type=None, publish='make_public'):
"""
Configure the XBlock to be created by the fixture.
These arguments have the same meaning as in the Studio REST API:
* `category`
* `display_name`
* `data`
* `metadata`
* `grader_type`
* `publish`
"""
self.category = category
self.display_name = display_name
self.data = data
self.metadata = metadata
self.grader_type = grader_type
self.publish = publish
self.children = []
self.locator = None
def add_children(self, *args):
"""
Add child XBlocks to this XBlock.
Each item in `args` is an `XBlockFixtureDescriptor` object.
Returns the `xblock_desc` instance to allow chaining.
"""
self.children.extend(args)
return self
def serialize(self):
"""
Return a JSON representation of the XBlock, suitable
for sending as POST data to /xblock
XBlocks are always set to public visibility.
"""
return json.dumps({
'display_name': self.display_name,
'data': self.data,
'metadata': self.metadata,
'graderType': self.grader_type,
'publish': self.publish
})
def __str__(self):
"""
Return a string representation of the description.
Useful for error messages.
"""
return dedent("""
<XBlockFixtureDescriptor:
category={0},
data={1},
metadata={2},
grader_type={3},
publish={4},
children={5},
locator={6},
>
""").strip().format(
self.category, self.data, self.metadata,
self.grader_type, self.publish, self.children, self.locator
)
# Description of course updates to add to the course
# `date` is a str (e.g. "January 29, 2014)
# `content` is also a str (e.g. "Test course")
CourseUpdateDesc = namedtuple("CourseUpdateDesc", ['date', 'content'])
class CourseFixtureError(Exception):
"""
Error occurred while installing a course fixture.
"""
pass
class CourseFixture(StudioApiFixture):
"""
Fixture for ensuring that a course exists.
WARNING: This fixture is NOT idempotent. To avoid conflicts
between tests, you should use unique course identifiers for each fixture.
"""
def __init__(self, org, number, run, display_name, start_date=None, end_date=None):
"""
Configure the course fixture to create a course with
`org`, `number`, `run`, and `display_name` (all unicode).
`start_date` and `end_date` are datetime objects indicating the course start and end date.
The default is for the course to have started in the distant past, which is generally what
we want for testing so students can enroll.
These have the same meaning as in the Studio restful API /course end-point.
"""
self._course_dict = {
'org': org,
'number': number,
'run': run,
'display_name': display_name
}
# Set a default start date to the past, but use Studio's
# default for the end date (meaning we don't set it here)
if start_date is None:
start_date = datetime.datetime(1970, 1, 1)
self._course_details = {
'start_date': start_date.isoformat(),
}
if end_date is not None:
self._course_details['end_date'] = end_date.isoformat()
self._updates = []
self._handouts = []
self.children = []
self._assets = []
self._advanced_settings = {}
self._course_key = None
def __str__(self):
"""
String representation of the course fixture, useful for debugging.
"""
return "<CourseFixture: org='{org}', number='{number}', run='{run}'>".format(**self._course_dict)
def add_children(self, *args):
"""
Add children XBlock to the course.
Each item in `args` is an `XBlockFixtureDescriptor` object.
Returns the course fixture to allow chaining.
"""
self.children.extend(args)
return self
def add_update(self, update):
"""
Add an update to the course. `update` should be a `CourseUpdateDesc`.
"""
self._updates.append(update)
def add_handout(self, asset_name):
"""
Add the handout named `asset_name` to the course info page.
Note that this does not actually *create* the static asset; it only links to it.
"""
self._handouts.append(asset_name)
def add_asset(self, asset_name):
"""
Add the asset to the list of assets to be uploaded when the install method is called.
"""
self._assets.extend(asset_name)
def add_advanced_settings(self, settings):
"""
Adds advanced settings to be set on the course when the install method is called.
"""
self._advanced_settings.update(settings)
def install(self):
"""
Create the course and XBlocks within the course.
This is NOT an idempotent method; if the course already exists, this will
raise a `CourseFixtureError`. You should use unique course identifiers to avoid
conflicts between tests.
"""
self._create_course()
self._install_course_updates()
self._install_course_handouts()
self._configure_course()
self._upload_assets()
self._add_advanced_settings()
self._create_xblock_children(self._course_location, self.children)
return self
@property
def _course_location(self):
"""
Return the locator string for the course.
"""
course_key = CourseKey.from_string(self._course_key)
if getattr(course_key, 'deprecated', False):
block_id = self._course_dict['run']
else:
block_id = 'course'
return unicode(course_key.make_usage_key('course', block_id))
@property
def _assets_url(self):
"""
Return the url string for the assets
"""
return "/assets/" + self._course_key + "/"
@property
def _handouts_loc(self):
"""
Return the locator string for the course handouts
"""
course_key = CourseKey.from_string(self._course_key)
return unicode(course_key.make_usage_key('course_info', 'handouts'))
def _create_course(self):
"""
Create the course described in the fixture.
"""
# If the course already exists, this will respond
# with a 200 and an error message, which we ignore.
response = self.session.post(
STUDIO_BASE_URL + '/course/',
data=self._encode_post_dict(self._course_dict),
headers=self.headers
)
try:
err = response.json().get('ErrMsg')
except ValueError:
raise CourseFixtureError(
"Could not parse response from course request as JSON: '{0}'".format(
response.content))
# This will occur if the course identifier is not unique
if err is not None:
raise CourseFixtureError("Could not create course {0}. Error message: '{1}'".format(self, err))
if response.ok:
self._course_key = response.json()['course_key']
else:
raise CourseFixtureError(
"Could not create course {0}. Status was {1}".format(
self._course_dict, response.status_code))
def _configure_course(self):
"""
Configure course settings (e.g. start and end date)
"""
url = STUDIO_BASE_URL + '/settings/details/' + self._course_key
# First, get the current values
response = self.session.get(url, headers=self.headers)
if not response.ok:
raise CourseFixtureError(
"Could not retrieve course details. Status was {0}".format(
response.status_code))
try:
details = response.json()
except ValueError:
raise CourseFixtureError(
"Could not decode course details as JSON: '{0}'".format(details)
)
# Update the old details with our overrides
details.update(self._course_details)
# POST the updated details to Studio
response = self.session.post(
url, data=self._encode_post_dict(details),
headers=self.headers,
)
if not response.ok:
raise CourseFixtureError(
"Could not update course details to '{0}' with {1}: Status was {2}.".format(
self._course_details, url, response.status_code))
def _install_course_handouts(self):
"""
Add handouts to the course info page.
"""
url = STUDIO_BASE_URL + '/xblock/' + self._handouts_loc
# Construct HTML with each of the handout links
handouts_li = [
'<li><a href="/static/{handout}">Example Handout</a></li>'.format(handout=handout)
for handout in self._handouts
]
handouts_html = '<ol class="treeview-handoutsnav">{}</ol>'.format("".join(handouts_li))
# Update the course's handouts HTML
payload = json.dumps({
'children': None,
'data': handouts_html,
'id': self._handouts_loc,
'metadata': dict()
})
response = self.session.post(url, data=payload, headers=self.headers)
if not response.ok:
raise CourseFixtureError(
"Could not update course handouts with {0}. Status was {1}".format(url, response.status_code))
def _install_course_updates(self):
"""
Add updates to the course, if any are configured.
"""
url = STUDIO_BASE_URL + '/course_info_update/' + self._course_key + '/'
for update in self._updates:
# Add the update to the course
date, content = update
payload = json.dumps({'date': date, 'content': content})
response = self.session.post(url, headers=self.headers, data=payload)
if not response.ok:
raise CourseFixtureError(
"Could not add update to course: {0} with {1}. Status was {2}".format(
update, url, response.status_code))
def _upload_assets(self):
"""
Upload assets
:raise CourseFixtureError:
"""
url = STUDIO_BASE_URL + self._assets_url
test_dir = path(__file__).abspath().dirname().dirname().dirname()
for asset_name in self._assets:
asset_file_path = test_dir + '/data/uploads/' + asset_name
asset_file = open(asset_file_path)
files = {'file': (asset_name, asset_file, mimetypes.guess_type(asset_file_path)[0])}
headers = {
'Accept': 'application/json',
'X-CSRFToken': self.session_cookies.get('csrftoken', '')
}
upload_response = self.session.post(url, files=files, headers=headers)
if not upload_response.ok:
raise CourseFixtureError('Could not upload {asset_name} with {url}. Status code: {code}'.format(
asset_name=asset_name, url=url, code=upload_response.status_code))
def _add_advanced_settings(self):
"""
Add advanced settings.
"""
url = STUDIO_BASE_URL + "/settings/advanced/" + self._course_key
# POST advanced settings to Studio
response = self.session.post(
url, data=self._encode_post_dict(self._advanced_settings),
headers=self.headers,
)
if not response.ok:
raise CourseFixtureError(
"Could not update advanced details to '{0}' with {1}: Status was {2}.".format(
self._advanced_settings, url, response.status_code))
def _create_xblock_children(self, parent_loc, xblock_descriptions):
"""
Recursively create XBlock children.
"""
for desc in xblock_descriptions:
loc = self.create_xblock(parent_loc, desc)
self._create_xblock_children(loc, desc.children)
self._publish_xblock(parent_loc)
def get_nested_xblocks(self, category=None):
"""
Return a list of nested XBlocks for the course that can be filtered by
category.
"""
xblocks = self._get_nested_xblocks(self)
if category:
xblocks = filter(lambda x: x.category == category, xblocks)
return xblocks
def _get_nested_xblocks(self, xblock_descriptor):
"""
Return a list of nested XBlocks for the course.
"""
xblocks = list(xblock_descriptor.children)
for child in xblock_descriptor.children:
xblocks.extend(self._get_nested_xblocks(child))
return xblocks
def create_xblock(self, parent_loc, xblock_desc):
"""
Create an XBlock with `parent_loc` (the location of the parent block)
and `xblock_desc` (an `XBlockFixtureDesc` instance).
"""
create_payload = {
'category': xblock_desc.category,
'display_name': xblock_desc.display_name,
}
if parent_loc is not None:
create_payload['parent_locator'] = parent_loc
# Create the new XBlock
response = self.session.post(
STUDIO_BASE_URL + '/xblock/',
data=json.dumps(create_payload),
headers=self.headers,
)
if not response.ok:
msg = "Could not create {0}. Status was {1}".format(xblock_desc, response.status_code)
raise CourseFixtureError(msg)
try:
loc = response.json().get('locator')
xblock_desc.locator = loc
except ValueError:
raise CourseFixtureError("Could not decode JSON from '{0}'".format(response.content))
# Configure the XBlock
response = self.session.post(
STUDIO_BASE_URL + '/xblock/' + loc,
data=xblock_desc.serialize(),
headers=self.headers,
)
if response.ok:
return loc
else:
raise CourseFixtureError(
"Could not update {0}. Status code: {1}".format(
xblock_desc, response.status_code))
def _publish_xblock(self, locator):
"""
Publish the xblock at `locator`.
"""
self._update_xblock(locator, {'publish': 'make_public'})
def _update_xblock(self, locator, data):
"""
Update the xblock at `locator`.
"""
# Create the new XBlock
response = self.session.put(
"{}/xblock/{}".format(STUDIO_BASE_URL, locator),
data=json.dumps(data),
headers=self.headers,
)
if not response.ok:
msg = "Could not update {} with data {}. Status was {}".format(locator, data, response.status_code)
raise CourseFixtureError(msg)
def _encode_post_dict(self, post_dict):
"""
Encode `post_dict` (a dictionary) as UTF-8 encoded JSON.
"""
return json.dumps({
k: v.encode('utf-8') if isinstance(v, basestring) else v
for k, v in post_dict.items()
})
| agpl-3.0 |
HenriWahl/Nagstamon | Nagstamon/thirdparty/Xlib/keysymdef/korean.py | 14 | 2860 | XK_Hangul = 0xff31
XK_Hangul_Start = 0xff32
XK_Hangul_End = 0xff33
XK_Hangul_Hanja = 0xff34
XK_Hangul_Jamo = 0xff35
XK_Hangul_Romaja = 0xff36
XK_Hangul_Codeinput = 0xff37
XK_Hangul_Jeonja = 0xff38
XK_Hangul_Banja = 0xff39
XK_Hangul_PreHanja = 0xff3a
XK_Hangul_PostHanja = 0xff3b
XK_Hangul_SingleCandidate = 0xff3c
XK_Hangul_MultipleCandidate = 0xff3d
XK_Hangul_PreviousCandidate = 0xff3e
XK_Hangul_Special = 0xff3f
XK_Hangul_switch = 0xFF7E
XK_Hangul_Kiyeog = 0xea1
XK_Hangul_SsangKiyeog = 0xea2
XK_Hangul_KiyeogSios = 0xea3
XK_Hangul_Nieun = 0xea4
XK_Hangul_NieunJieuj = 0xea5
XK_Hangul_NieunHieuh = 0xea6
XK_Hangul_Dikeud = 0xea7
XK_Hangul_SsangDikeud = 0xea8
XK_Hangul_Rieul = 0xea9
XK_Hangul_RieulKiyeog = 0xeaa
XK_Hangul_RieulMieum = 0xeab
XK_Hangul_RieulPieub = 0xeac
XK_Hangul_RieulSios = 0xead
XK_Hangul_RieulTieut = 0xeae
XK_Hangul_RieulPhieuf = 0xeaf
XK_Hangul_RieulHieuh = 0xeb0
XK_Hangul_Mieum = 0xeb1
XK_Hangul_Pieub = 0xeb2
XK_Hangul_SsangPieub = 0xeb3
XK_Hangul_PieubSios = 0xeb4
XK_Hangul_Sios = 0xeb5
XK_Hangul_SsangSios = 0xeb6
XK_Hangul_Ieung = 0xeb7
XK_Hangul_Jieuj = 0xeb8
XK_Hangul_SsangJieuj = 0xeb9
XK_Hangul_Cieuc = 0xeba
XK_Hangul_Khieuq = 0xebb
XK_Hangul_Tieut = 0xebc
XK_Hangul_Phieuf = 0xebd
XK_Hangul_Hieuh = 0xebe
XK_Hangul_A = 0xebf
XK_Hangul_AE = 0xec0
XK_Hangul_YA = 0xec1
XK_Hangul_YAE = 0xec2
XK_Hangul_EO = 0xec3
XK_Hangul_E = 0xec4
XK_Hangul_YEO = 0xec5
XK_Hangul_YE = 0xec6
XK_Hangul_O = 0xec7
XK_Hangul_WA = 0xec8
XK_Hangul_WAE = 0xec9
XK_Hangul_OE = 0xeca
XK_Hangul_YO = 0xecb
XK_Hangul_U = 0xecc
XK_Hangul_WEO = 0xecd
XK_Hangul_WE = 0xece
XK_Hangul_WI = 0xecf
XK_Hangul_YU = 0xed0
XK_Hangul_EU = 0xed1
XK_Hangul_YI = 0xed2
XK_Hangul_I = 0xed3
XK_Hangul_J_Kiyeog = 0xed4
XK_Hangul_J_SsangKiyeog = 0xed5
XK_Hangul_J_KiyeogSios = 0xed6
XK_Hangul_J_Nieun = 0xed7
XK_Hangul_J_NieunJieuj = 0xed8
XK_Hangul_J_NieunHieuh = 0xed9
XK_Hangul_J_Dikeud = 0xeda
XK_Hangul_J_Rieul = 0xedb
XK_Hangul_J_RieulKiyeog = 0xedc
XK_Hangul_J_RieulMieum = 0xedd
XK_Hangul_J_RieulPieub = 0xede
XK_Hangul_J_RieulSios = 0xedf
XK_Hangul_J_RieulTieut = 0xee0
XK_Hangul_J_RieulPhieuf = 0xee1
XK_Hangul_J_RieulHieuh = 0xee2
XK_Hangul_J_Mieum = 0xee3
XK_Hangul_J_Pieub = 0xee4
XK_Hangul_J_PieubSios = 0xee5
XK_Hangul_J_Sios = 0xee6
XK_Hangul_J_SsangSios = 0xee7
XK_Hangul_J_Ieung = 0xee8
XK_Hangul_J_Jieuj = 0xee9
XK_Hangul_J_Cieuc = 0xeea
XK_Hangul_J_Khieuq = 0xeeb
XK_Hangul_J_Tieut = 0xeec
XK_Hangul_J_Phieuf = 0xeed
XK_Hangul_J_Hieuh = 0xeee
XK_Hangul_RieulYeorinHieuh = 0xeef
XK_Hangul_SunkyeongeumMieum = 0xef0
XK_Hangul_SunkyeongeumPieub = 0xef1
XK_Hangul_PanSios = 0xef2
XK_Hangul_KkogjiDalrinIeung = 0xef3
XK_Hangul_SunkyeongeumPhieuf = 0xef4
XK_Hangul_YeorinHieuh = 0xef5
XK_Hangul_AraeA = 0xef6
XK_Hangul_AraeAE = 0xef7
XK_Hangul_J_PanSios = 0xef8
XK_Hangul_J_KkogjiDalrinIeung = 0xef9
XK_Hangul_J_YeorinHieuh = 0xefa
XK_Korean_Won = 0xeff
| gpl-2.0 |
bzhou26/NRA-Crawler | selenium/webdriver/support/color.py | 71 | 11399 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
RGB_PATTERN = r"^\s*rgb\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*\)\s*$"
RGB_PCT_PATTERN = r"^\s*rgb\(\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*\)\s*$"
RGBA_PATTERN = r"^\s*rgba\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
RGBA_PCT_PATTERN = r"^\s*rgba\(\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
HEX_PATTERN = r"#([A-Fa-f0-9]{2})([A-Fa-f0-9]{2})([A-Fa-f0-9]{2})"
HEX3_PATTERN = r"#([A-Fa-f0-9])([A-Fa-f0-9])([A-Fa-f0-9])"
HSL_PATTERN = r"^\s*hsl\(\s*(\d{1,3})\s*,\s*(\d{1,3})%\s*,\s*(\d{1,3})%\s*\)\s*$"
HSLA_PATTERN = r"^\s*hsla\(\s*(\d{1,3})\s*,\s*(\d{1,3})%\s*,\s*(\d{1,3})%\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
class Color(object):
"""
Color conversion support class
Example:
.. code-block:: python
from selenium.webdriver.support.color import Color
print(Color.from_string('#00ff33').rgba)
print(Color.from_string('rgb(1, 255, 3)').hex)
print(Color.from_string('blue').rgba)
"""
@staticmethod
def from_string(str_):
import re
class Matcher(object):
def __init__(self):
self.match_obj = None
def match(self, pattern, str_):
self.match_obj = re.match(pattern, str_)
return self.match_obj
@property
def groups(self):
return () if self.match_obj is None else self.match_obj.groups()
m = Matcher()
if m.match(RGB_PATTERN, str_):
return Color(*m.groups)
elif m.match(RGB_PCT_PATTERN, str_):
rgb = tuple([float(each) / 100 * 255 for each in m.groups])
return Color(*rgb)
elif m.match(RGBA_PATTERN, str_):
return Color(*m.groups)
elif m.match(RGBA_PCT_PATTERN, str_):
rgba = tuple([float(each) / 100 * 255 for each in m.groups[:3]] + [m.groups[3]])
return Color(*rgba)
elif m.match(HEX_PATTERN, str_):
rgb = tuple([int(each, 16) for each in m.groups])
return Color(*rgb)
elif m.match(HEX3_PATTERN, str_):
rgb = tuple([int(each * 2, 16) for each in m.groups])
return Color(*rgb)
elif m.match(HSL_PATTERN, str_) or m.match(HSLA_PATTERN, str_):
return Color._from_hsl(*m.groups)
elif str_.upper() in Colors.keys():
return Colors[str_.upper()]
else:
raise ValueError("Could not convert %s into color" % str_)
@staticmethod
def _from_hsl(h, s, l, a=1):
h = float(h) / 360
s = float(s) / 100
l = float(l) / 100
if s == 0:
r = l
g = r
b = r
else:
luminocity2 = l * (1 + s) if l < 0.5 else l + s - l * s
luminocity1 = 2 * l - luminocity2
def hue_to_rgb(lum1, lum2, hue):
if hue < 0.0:
hue += 1
if hue > 1.0:
hue -= 1
if hue < 1.0 / 6.0:
return (lum1 + (lum2 - lum1) * 6.0 * hue)
elif hue < 1.0 / 2.0:
return lum2
elif hue < 2.0 / 3.0:
return lum1 + (lum2 - lum1) * ((2.0 / 3.0) - hue) * 6.0
else:
return lum1
r = hue_to_rgb(luminocity1, luminocity2, h + 1.0 / 3.0)
g = hue_to_rgb(luminocity1, luminocity2, h)
b = hue_to_rgb(luminocity1, luminocity2, h - 1.0 / 3.0)
return Color(round(r * 255), round(g * 255), round(b * 255), a)
def __init__(self, red, green, blue, alpha=1):
self.red = int(red)
self.green = int(green)
self.blue = int(blue)
self.alpha = "1" if float(alpha) == 1 else str(float(alpha) or 0)
@property
def rgb(self):
return "rgb(%d, %d, %d)" % (self.red, self.green, self.blue)
@property
def rgba(self):
return "rgba(%d, %d, %d, %s)" % (self.red, self.green, self.blue, self.alpha)
@property
def hex(self):
return "#%02x%02x%02x" % (self.red, self.green, self.blue)
def __eq__(self, other):
if isinstance(other, Color):
return self.rgba == other.rgba
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
return hash((self.red, self.green, self.blue, self.alpha))
def __repr__(self):
return "Color(red=%d, green=%d, blue=%d, alpha=%s)" % (self.red, self.green, self.blue, self.alpha)
def __str__(self):
return "Color: %s" % self.rgba
# Basic, extended and transparent colour keywords as defined by the W3C HTML4 spec
# See http://www.w3.org/TR/css3-color/#html4
Colors = {
"TRANSPARENT": Color(0, 0, 0, 0),
"ALICEBLUE": Color(240, 248, 255),
"ANTIQUEWHITE": Color(250, 235, 215),
"AQUA": Color(0, 255, 255),
"AQUAMARINE": Color(127, 255, 212),
"AZURE": Color(240, 255, 255),
"BEIGE": Color(245, 245, 220),
"BISQUE": Color(255, 228, 196),
"BLACK": Color(0, 0, 0),
"BLANCHEDALMOND": Color(255, 235, 205),
"BLUE": Color(0, 0, 255),
"BLUEVIOLET": Color(138, 43, 226),
"BROWN": Color(165, 42, 42),
"BURLYWOOD": Color(222, 184, 135),
"CADETBLUE": Color(95, 158, 160),
"CHARTREUSE": Color(127, 255, 0),
"CHOCOLATE": Color(210, 105, 30),
"CORAL": Color(255, 127, 80),
"CORNFLOWERBLUE": Color(100, 149, 237),
"CORNSILK": Color(255, 248, 220),
"CRIMSON": Color(220, 20, 60),
"CYAN": Color(0, 255, 255),
"DARKBLUE": Color(0, 0, 139),
"DARKCYAN": Color(0, 139, 139),
"DARKGOLDENROD": Color(184, 134, 11),
"DARKGRAY": Color(169, 169, 169),
"DARKGREEN": Color(0, 100, 0),
"DARKGREY": Color(169, 169, 169),
"DARKKHAKI": Color(189, 183, 107),
"DARKMAGENTA": Color(139, 0, 139),
"DARKOLIVEGREEN": Color(85, 107, 47),
"DARKORANGE": Color(255, 140, 0),
"DARKORCHID": Color(153, 50, 204),
"DARKRED": Color(139, 0, 0),
"DARKSALMON": Color(233, 150, 122),
"DARKSEAGREEN": Color(143, 188, 143),
"DARKSLATEBLUE": Color(72, 61, 139),
"DARKSLATEGRAY": Color(47, 79, 79),
"DARKSLATEGREY": Color(47, 79, 79),
"DARKTURQUOISE": Color(0, 206, 209),
"DARKVIOLET": Color(148, 0, 211),
"DEEPPINK": Color(255, 20, 147),
"DEEPSKYBLUE": Color(0, 191, 255),
"DIMGRAY": Color(105, 105, 105),
"DIMGREY": Color(105, 105, 105),
"DODGERBLUE": Color(30, 144, 255),
"FIREBRICK": Color(178, 34, 34),
"FLORALWHITE": Color(255, 250, 240),
"FORESTGREEN": Color(34, 139, 34),
"FUCHSIA": Color(255, 0, 255),
"GAINSBORO": Color(220, 220, 220),
"GHOSTWHITE": Color(248, 248, 255),
"GOLD": Color(255, 215, 0),
"GOLDENROD": Color(218, 165, 32),
"GRAY": Color(128, 128, 128),
"GREY": Color(128, 128, 128),
"GREEN": Color(0, 128, 0),
"GREENYELLOW": Color(173, 255, 47),
"HONEYDEW": Color(240, 255, 240),
"HOTPINK": Color(255, 105, 180),
"INDIANRED": Color(205, 92, 92),
"INDIGO": Color(75, 0, 130),
"IVORY": Color(255, 255, 240),
"KHAKI": Color(240, 230, 140),
"LAVENDER": Color(230, 230, 250),
"LAVENDERBLUSH": Color(255, 240, 245),
"LAWNGREEN": Color(124, 252, 0),
"LEMONCHIFFON": Color(255, 250, 205),
"LIGHTBLUE": Color(173, 216, 230),
"LIGHTCORAL": Color(240, 128, 128),
"LIGHTCYAN": Color(224, 255, 255),
"LIGHTGOLDENRODYELLOW": Color(250, 250, 210),
"LIGHTGRAY": Color(211, 211, 211),
"LIGHTGREEN": Color(144, 238, 144),
"LIGHTGREY": Color(211, 211, 211),
"LIGHTPINK": Color(255, 182, 193),
"LIGHTSALMON": Color(255, 160, 122),
"LIGHTSEAGREEN": Color(32, 178, 170),
"LIGHTSKYBLUE": Color(135, 206, 250),
"LIGHTSLATEGRAY": Color(119, 136, 153),
"LIGHTSLATEGREY": Color(119, 136, 153),
"LIGHTSTEELBLUE": Color(176, 196, 222),
"LIGHTYELLOW": Color(255, 255, 224),
"LIME": Color(0, 255, 0),
"LIMEGREEN": Color(50, 205, 50),
"LINEN": Color(250, 240, 230),
"MAGENTA": Color(255, 0, 255),
"MAROON": Color(128, 0, 0),
"MEDIUMAQUAMARINE": Color(102, 205, 170),
"MEDIUMBLUE": Color(0, 0, 205),
"MEDIUMORCHID": Color(186, 85, 211),
"MEDIUMPURPLE": Color(147, 112, 219),
"MEDIUMSEAGREEN": Color(60, 179, 113),
"MEDIUMSLATEBLUE": Color(123, 104, 238),
"MEDIUMSPRINGGREEN": Color(0, 250, 154),
"MEDIUMTURQUOISE": Color(72, 209, 204),
"MEDIUMVIOLETRED": Color(199, 21, 133),
"MIDNIGHTBLUE": Color(25, 25, 112),
"MINTCREAM": Color(245, 255, 250),
"MISTYROSE": Color(255, 228, 225),
"MOCCASIN": Color(255, 228, 181),
"NAVAJOWHITE": Color(255, 222, 173),
"NAVY": Color(0, 0, 128),
"OLDLACE": Color(253, 245, 230),
"OLIVE": Color(128, 128, 0),
"OLIVEDRAB": Color(107, 142, 35),
"ORANGE": Color(255, 165, 0),
"ORANGERED": Color(255, 69, 0),
"ORCHID": Color(218, 112, 214),
"PALEGOLDENROD": Color(238, 232, 170),
"PALEGREEN": Color(152, 251, 152),
"PALETURQUOISE": Color(175, 238, 238),
"PALEVIOLETRED": Color(219, 112, 147),
"PAPAYAWHIP": Color(255, 239, 213),
"PEACHPUFF": Color(255, 218, 185),
"PERU": Color(205, 133, 63),
"PINK": Color(255, 192, 203),
"PLUM": Color(221, 160, 221),
"POWDERBLUE": Color(176, 224, 230),
"PURPLE": Color(128, 0, 128),
"REBECCAPURPLE": Color(128, 51, 153),
"RED": Color(255, 0, 0),
"ROSYBROWN": Color(188, 143, 143),
"ROYALBLUE": Color(65, 105, 225),
"SADDLEBROWN": Color(139, 69, 19),
"SALMON": Color(250, 128, 114),
"SANDYBROWN": Color(244, 164, 96),
"SEAGREEN": Color(46, 139, 87),
"SEASHELL": Color(255, 245, 238),
"SIENNA": Color(160, 82, 45),
"SILVER": Color(192, 192, 192),
"SKYBLUE": Color(135, 206, 235),
"SLATEBLUE": Color(106, 90, 205),
"SLATEGRAY": Color(112, 128, 144),
"SLATEGREY": Color(112, 128, 144),
"SNOW": Color(255, 250, 250),
"SPRINGGREEN": Color(0, 255, 127),
"STEELBLUE": Color(70, 130, 180),
"TAN": Color(210, 180, 140),
"TEAL": Color(0, 128, 128),
"THISTLE": Color(216, 191, 216),
"TOMATO": Color(255, 99, 71),
"TURQUOISE": Color(64, 224, 208),
"VIOLET": Color(238, 130, 238),
"WHEAT": Color(245, 222, 179),
"WHITE": Color(255, 255, 255),
"WHITESMOKE": Color(245, 245, 245),
"YELLOW": Color(255, 255, 0),
"YELLOWGREEN": Color(154, 205, 50)
}
| mit |
Xtrend-Official/Xtrend-E2 | lib/python/Screens/VirtualKeyBoard.py | 5 | 18445 | # -*- coding: UTF-8 -*-
from enigma import eListboxPythonMultiContent, gFont, RT_HALIGN_CENTER, RT_VALIGN_CENTER, getPrevAsciiCode
from Screen import Screen
from Components.Language import language
from Components.ActionMap import NumberActionMap
from Components.Sources.StaticText import StaticText
from Components.Input import Input
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.MenuList import MenuList
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from Tools.LoadPixmap import LoadPixmap
from Tools.NumericalTextInput import NumericalTextInput
class VirtualKeyBoardList(MenuList):
def __init__(self, list, enableWrapAround=False):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
self.l.setFont(0, gFont("Regular", 28))
self.l.setItemHeight(45)
class VirtualKeyBoardEntryComponent:
pass
class VirtualKeyBoard(Screen):
def __init__(self, session, title="", **kwargs):
Screen.__init__(self, session)
self.keys_list = []
self.shiftkeys_list = []
self.lang = language.getLanguage()
self.nextLang = None
self.shiftMode = False
self.selectedKey = 0
self.smsChar = None
self.sms = NumericalTextInput(self.smsOK)
self.key_bg = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_bg.png"))
self.key_sel = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_sel.png"))
self.key_backspace = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_backspace.png"))
self.key_all = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_all.png"))
self.key_clr = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_clr.png"))
self.key_esc = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_esc.png"))
self.key_ok = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_ok.png"))
self.key_shift = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_shift.png"))
self.key_shift_sel = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_shift_sel.png"))
self.key_space = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_space.png"))
self.key_left = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_left.png"))
self.key_right = LoadPixmap(path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_right.png"))
self.keyImages = {
"BACKSPACE": self.key_backspace,
"ALL": self.key_all,
"EXIT": self.key_esc,
"OK": self.key_ok,
"SHIFT": self.key_shift,
"SPACE": self.key_space,
"LEFT": self.key_left,
"RIGHT": self.key_right
}
self.keyImagesShift = {
"BACKSPACE": self.key_backspace,
"CLEAR": self.key_clr,
"EXIT": self.key_esc,
"OK": self.key_ok,
"SHIFT": self.key_shift_sel,
"SPACE": self.key_space,
"LEFT": self.key_left,
"RIGHT": self.key_right
}
self["country"] = StaticText("")
self["header"] = Label(title)
self["text"] = Input(currPos=len(kwargs.get("text", "").decode("utf-8",'ignore')), allMarked=False, **kwargs)
self["list"] = VirtualKeyBoardList([])
self["actions"] = NumberActionMap(["OkCancelActions", "WizardActions", "ColorActions", "KeyboardInputActions", "InputBoxActions", "InputAsciiActions"],
{
"gotAsciiCode": self.keyGotAscii,
"ok": self.okClicked,
"cancel": self.exit,
"left": self.left,
"right": self.right,
"up": self.up,
"down": self.down,
"red": self.exit,
"green": self.ok,
"yellow": self.switchLang,
"blue": self.shiftClicked,
"deleteBackward": self.backClicked,
"deleteForward": self.forwardClicked,
"back": self.exit,
"pageUp": self.cursorRight,
"pageDown": self.cursorLeft,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal,
}, -2)
self.setLang()
self.onExecBegin.append(self.setKeyboardModeAscii)
self.onLayoutFinish.append(self.buildVirtualKeyBoard)
self.onClose.append(self.__onClose)
def __onClose(self):
self.sms.timer.stop()
def switchLang(self):
self.lang = self.nextLang
self.setLang()
self.buildVirtualKeyBoard()
def setLang(self):
if self.lang == 'de_DE':
self.keys_list = [
[u"EXIT", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"BACKSPACE"],
[u"q", u"w", u"e", u"r", u"t", u"z", u"u", u"i", u"o", u"p", u"ü", u"+"],
[u"a", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u"ö", u"ä", u"#"],
[u"<", u"y", u"x", u"c", u"v", u"b", u"n", u"m", u",", ".", u"-", u"ALL"],
[u"SHIFT", u"SPACE", u"@", u"ß", u"OK", u"LEFT", u"RIGHT"]]
self.shiftkeys_list = [
[u"EXIT", u"!", u'"', u"§", u"$", u"%", u"&", u"/", u"(", u")", u"=", u"BACKSPACE"],
[u"Q", u"W", u"E", u"R", u"T", u"Z", u"U", u"I", u"O", u"P", u"Ü", u"*"],
[u"A", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u"Ö", u"Ä", u"'"],
[u">", u"Y", u"X", u"C", u"V", u"B", u"N", u"M", u";", u":", u"_", u"CLEAR"],
[u"SHIFT", u"SPACE", u"?", u"\\", u"OK", u"LEFT", u"RIGHT"]]
self.nextLang = 'es_ES'
elif self.lang == 'es_ES':
#still missing keys (u"ùÙ")
self.keys_list = [
[u"EXIT", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"BACKSPACE"],
[u"q", u"w", u"e", u"r", u"t", u"z", u"u", u"i", u"o", u"p", u"ú", u"+"],
[u"a", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u"ó", u"á", u"#"],
[u"<", u"y", u"x", u"c", u"v", u"b", u"n", u"m", u",", ".", u"-", u"ALL"],
[u"SHIFT", u"SPACE", u"@", u"Ł", u"ŕ", u"é", u"č", u"í", u"ě", u"ń", u"ň", u"OK"]]
self.shiftkeys_list = [
[u"EXIT", u"!", u'"', u"§", u"$", u"%", u"&", u"/", u"(", u")", u"=", u"BACKSPACE"],
[u"Q", u"W", u"E", u"R", u"T", u"Z", u"U", u"I", u"O", u"P", u"Ú", u"*"],
[u"A", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u"Ó", u"Á", u"'"],
[u">", u"Y", u"X", u"C", u"V", u"B", u"N", u"M", u";", u":", u"_", u"CLEAR"],
[u"SHIFT", u"SPACE", u"?", u"\\", u"Ŕ", u"É", u"Č", u"Í", u"Ě", u"Ń", u"Ň", u"OK"]]
self.nextLang = 'fi_FI'
elif self.lang == 'fi_FI':
self.keys_list = [
[u"EXIT", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"BACKSPACE"],
[u"q", u"w", u"e", u"r", u"t", u"z", u"u", u"i", u"o", u"p", u"é", u"+"],
[u"a", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u"ö", u"ä", u"#"],
[u"<", u"y", u"x", u"c", u"v", u"b", u"n", u"m", u",", ".", u"-", u"ALL"],
[u"SHIFT", u"SPACE", u"@", u"ß", u"ĺ", u"OK", u"LEFT", u"RIGHT"]]
self.shiftkeys_list = [
[u"EXIT", u"!", u'"', u"§", u"$", u"%", u"&", u"/", u"(", u")", u"=", u"BACKSPACE"],
[u"Q", u"W", u"E", u"R", u"T", u"Z", u"U", u"I", u"O", u"P", u"É", u"*"],
[u"A", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u"Ö", u"Ä", u"'"],
[u">", u"Y", u"X", u"C", u"V", u"B", u"N", u"M", u";", u":", u"_", u"CLEAR"],
[u"SHIFT", u"SPACE", u"?", u"\\", u"Ĺ", u"OK", u"LEFT", u"RIGHT"]]
self.nextLang = 'ru_RU'
elif self.lang == 'ru_RU':
self.keys_list = [
[u"EXIT", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"BACKSPACE"],
[u"а", u"б", u"в", u"г", u"д", u"е", u"ё", u"ж", u"з", u"и", u"й", u"+"],
[u"к", u"л", u"м", u"н", u"о", u"п", u"р", u"с", u"т", u"у", u"ф", u"#"],
[u"<", u"х", u"ц", u"ч", u"ш", u"щ", u"ъ", u"ы", u",", u".", u"-", u"ALL"],
[u"SHIFT", u"SPACE", u"@", u"ь", u"э", u"ю", u"я", u"OK", u"LEFT", u"RIGHT"]]
self.shiftkeys_list = [
[u"EXIT", u"!", u'"', u"§", u"$", u"%", u"&", u"/", u"(", u")", u"=", u"BACKSPACE"],
[u"А", u"Б", u"В", u"Г", u"Д", u"Е", u"Ё", u"Ж", u"З", u"И", u"Й", u"*"],
[u"К", u"Л", u"М", u"Н", u"О", u"П", u"Р", u"С", u"Т", u"У", u"Ф", u"'"],
[u">", u"Х", u"Ц", u"Ч", u"Ш", u"Щ", u"Ъ", u"Ы", u";", u":", u"_", u"CLEAR"],
[u"SHIFT", u"SPACE", u"?", u"\\", u"Ь", u"Э", u"Ю", u"Я", u"OK", u"LEFT", u"RIGHT"]]
self.nextLang = 'sv_SE'
elif self.lang == 'sv_SE':
self.keys_list = [
[u"EXIT", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"BACKSPACE"],
[u"q", u"w", u"e", u"r", u"t", u"z", u"u", u"i", u"o", u"p", u"é", u"+"],
[u"a", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u"ö", u"ä", u"#"],
[u"<", u"y", u"x", u"c", u"v", u"b", u"n", u"m", u",", ".", u"-", u"ALL"],
[u"SHIFT", u"SPACE", u"@", u"ß", u"ĺ", u"OK", u"LEFT", u"RIGHT"]]
self.shiftkeys_list = [
[u"EXIT", u"!", u'"', u"§", u"$", u"%", u"&", u"/", u"(", u")", u"=", u"BACKSPACE"],
[u"Q", u"W", u"E", u"R", u"T", u"Z", u"U", u"I", u"O", u"P", u"É", u"*"],
[u"A", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u"Ö", u"Ä", u"'"],
[u">", u"Y", u"X", u"C", u"V", u"B", u"N", u"M", u";", u":", u"_", u"CLEAR"],
[u"SHIFT", u"SPACE", u"?", u"\\", u"Ĺ", u"OK", u"LEFT", u"RIGHT"]]
self.nextLang = 'sk_SK'
elif self.lang =='sk_SK':
self.keys_list = [
[u"EXIT", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"BACKSPACE"],
[u"q", u"w", u"e", u"r", u"t", u"z", u"u", u"i", u"o", u"p", u"ú", u"+"],
[u"a", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u"ľ", u"@", u"#"],
[u"<", u"y", u"x", u"c", u"v", u"b", u"n", u"m", u",", ".", u"-", u"ALL"],
[u"SHIFT", u"SPACE", u"š", u"č", u"ž", u"ý", u"á", u"í", u"é", u"OK", u"LEFT", u"RIGHT"]]
self.shiftkeys_list = [
[u"EXIT", u"!", u'"', u"§", u"$", u"%", u"&", u"/", u"(", u")", u"=", u"BACKSPACE"],
[u"Q", u"W", u"E", u"R", u"T", u"Z", u"U", u"I", u"O", u"P", u"ť", u"*"],
[u"A", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u"ň", u"ď", u"'"],
[u"Á", u"É", u"Ď", u"Í", u"Ý", u"Ó", u"Ú", u"Ž", u"Š", u"Č", u"Ť", u"Ň"],
[u">", u"Y", u"X", u"C", u"V", u"B", u"N", u"M", u";", u":", u"_", u"CLEAR"],
[u"SHIFT", u"SPACE", u"?", u"\\", u"ä", u"ö", u"ü", u"ô", u"ŕ", u"ĺ", u"OK"]]
self.nextLang = 'cs_CZ'
elif self.lang == 'cs_CZ':
self.keys_list = [
[u"EXIT", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"BACKSPACE"],
[u"q", u"w", u"e", u"r", u"t", u"z", u"u", u"i", u"o", u"p", u"ú", u"+"],
[u"a", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u"ů", u"@", u"#"],
[u"<", u"y", u"x", u"c", u"v", u"b", u"n", u"m", u",", ".", u"-", u"ALL"],
[u"SHIFT", u"SPACE", u"ě", u"š", u"č", u"ř", u"ž", u"ý", u"á", u"í", u"é", u"OK"]]
self.shiftkeys_list = [
[u"EXIT", u"!", u'"', u"§", u"$", u"%", u"&", u"/", u"(", u")", u"=", u"BACKSPACE"],
[u"Q", u"W", u"E", u"R", u"T", u"Z", u"U", u"I", u"O", u"P", u"ť", u"*"],
[u"A", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u"ň", u"ď", u"'"],
[u">", u"Y", u"X", u"C", u"V", u"B", u"N", u"M", u";", u":", u"_", u"CLEAR"],
[u"SHIFT", u"SPACE", u"?", u"\\", u"Č", u"Ř", u"Š", u"Ž", u"Ú", u"Á", u"É", u"OK"]]
self.nextLang = 'el_GR'
elif self.lang == 'el_GR':
self.keys_list = [
[u"EXIT", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"BACKSPACE"],
[u"=", u"ς", u"ε", u"ρ", u"τ", u"υ", u"θ", u"ι", u"ο", u"π", u"[", u"]"],
[u"α", u"σ", u"δ", u"φ", u"γ", u"η", u"ξ", u"κ", u"λ", u";", u"'", u"-"],
[u"\\", u"ζ", u"χ", u"ψ", u"ω", u"β", u"ν", u"μ", u",", ".", u"/", u"ALL"],
[u"SHIFT", u"SPACE", u"ά", u"έ", u"ή", u"ί", u"ό", u"ύ", u"ώ", u"ϊ", u"ϋ", u"OK"]]
self.shiftkeys_list = [
[u"EXIT", u"!", u"@", u"#", u"$", u"%", u"^", u"&", u"*", u"(", u")", u"BACKSPACE"],
[u"+", u"€", u"Ε", u"Ρ", u"Τ", u"Υ", u"Θ", u"Ι", u"Ο", u"Π", u"{", u"}"],
[u"Α", u"Σ", u"Δ", u"Φ", u"Γ", u"Η", u"Ξ", u"Κ", u"Λ", u":", u'"', u"_"],
[u"|", u"Ζ", u"Χ", u"Ψ", u"Ω", u"Β", u"Ν", u"Μ", u"<", u">", u"?", u"CLEAR"],
[u"SHIFT", u"SPACE", u"Ά", u"Έ", u"Ή", u"Ί", u"Ό", u"Ύ", u"Ώ", u"Ϊ", u"Ϋ", u"OK"]]
self.nextLang = 'pl_PL'
elif self.lang == 'pl_PL':
self.keys_list = [
[u"EXIT", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"BACKSPACE"],
[u"q", u"w", u"e", u"r", u"t", u"y", u"u", u"i", u"o", u"p", u"-", u"["],
[u"a", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u";", u"'", u"\\"],
[u"<", u"z", u"x", u"c", u"v", u"b", u"n", u"m", u",", ".", u"/", u"ALL"],
[u"SHIFT", u"SPACE", u"ą", u"ć", u"ę", u"ł", u"ń", u"ó", u"ś", u"ź", u"ż", u"OK"]]
self.shiftkeys_list = [
[u"EXIT", u"!", u"@", u"#", u"$", u"%", u"^", u"&", u"(", u")", u"=", u"BACKSPACE"],
[u"Q", u"W", u"E", u"R", u"T", u"Y", u"U", u"I", u"O", u"P", u"*", u"]"],
[u"A", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u"?", u'"', u"|"],
[u">", u"Z", u"X", u"C", u"V", u"B", u"N", u"M", u";", u":", u"_", u"CLEAR"],
[u"SHIFT", u"SPACE", u"Ą", u"Ć", u"Ę", u"Ł", u"Ń", u"Ó", u"Ś", u"Ź", u"Ż", u"OK"]]
self.nextLang = 'en_EN'
else:
self.keys_list = [
[u"EXIT", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"BACKSPACE"],
[u"q", u"w", u"e", u"r", u"t", u"y", u"u", u"i", u"o", u"p", u"-", u"["],
[u"a", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u";", u"'", u"\\"],
[u"<", u"z", u"x", u"c", u"v", u"b", u"n", u"m", u",", ".", u"/", u"ALL"],
[u"SHIFT", u"SPACE", u"OK", u"LEFT", u"RIGHT", u"*"]]
self.shiftkeys_list = [
[u"EXIT", u"!", u"@", u"#", u"$", u"%", u"^", u"&", u"(", u")", u"=", u"BACKSPACE"],
[u"Q", u"W", u"E", u"R", u"T", u"Y", u"U", u"I", u"O", u"P", u"+", u"]"],
[u"A", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u"?", u'"', u"|"],
[u">", u"Z", u"X", u"C", u"V", u"B", u"N", u"M", u";", u":", u"_", u"CLEAR"],
[u"SHIFT", u"SPACE", u"OK", u"LEFT", u"RIGHT", u"~"]]
self.lang = 'en_EN'
self.nextLang = 'de_DE'
self["country"].setText(self.lang)
self.max_key=47+len(self.keys_list[4])
def virtualKeyBoardEntryComponent(self, keys):
key_bg_width = self.key_bg and self.key_bg.size().width() or 45
key_images = self.shiftMode and self.keyImagesShift or self.keyImages
res = [(keys)]
text = []
x = 0
for key in keys:
png = key_images.get(key, None)
if png:
width = png.size().width()
res.append(MultiContentEntryPixmapAlphaTest(pos=(x, 0), size=(width, 45), png=png))
else:
width = key_bg_width
res.append(MultiContentEntryPixmapAlphaTest(pos=(x, 0), size=(width, 45), png=self.key_bg))
text.append(MultiContentEntryText(pos=(x, 0), size=(width, 45), font=0, text=key.encode("utf-8"), flags=RT_HALIGN_CENTER | RT_VALIGN_CENTER))
x += width
return res + text
def buildVirtualKeyBoard(self):
self.previousSelectedKey = None
self.list = []
for keys in self.shiftMode and self.shiftkeys_list or self.keys_list:
self.list.append(self.virtualKeyBoardEntryComponent(keys))
self.markSelectedKey()
def markSelectedKey(self):
if self.previousSelectedKey is not None:
self.list[self.previousSelectedKey /12] = self.list[self.previousSelectedKey /12][:-1]
width = self.key_sel.size().width()
x = self.list[self.selectedKey/12][self.selectedKey % 12 + 1][1]
self.list[self.selectedKey / 12].append(MultiContentEntryPixmapAlphaTest(pos=(x, 0), size=(width, 45), png=self.key_sel))
self.previousSelectedKey = self.selectedKey
self["list"].setList(self.list)
def backClicked(self):
self["text"].deleteBackward()
def forwardClicked(self):
self["text"].deleteForward()
def shiftClicked(self):
self.smsChar = None
self.shiftMode = not self.shiftMode
self.buildVirtualKeyBoard()
def okClicked(self):
self.smsChar = None
text = (self.shiftMode and self.shiftkeys_list or self.keys_list)[self.selectedKey / 12][self.selectedKey % 12].encode("UTF-8")
if text == "EXIT":
self.close(None)
elif text == "BACKSPACE":
self["text"].deleteBackward()
elif text == "ALL":
self["text"].markAll()
elif text == "CLEAR":
self["text"].deleteAllChars()
self["text"].update()
elif text == "SHIFT":
self.shiftClicked()
elif text == "SPACE":
self["text"].char(" ".encode("UTF-8"))
elif text == "OK":
self.close(self["text"].getText().encode("UTF-8"))
elif text == "LEFT":
self["text"].left()
elif text == "RIGHT":
self["text"].right()
else:
self["text"].char(text)
def ok(self):
self.close(self["text"].getText().encode("UTF-8"))
def exit(self):
self.close(None)
def cursorRight(self):
self["text"].right()
def cursorLeft(self):
self["text"].left()
def left(self):
self.smsChar = None
self.selectedKey = self.selectedKey / 12 * 12 + (self.selectedKey + 11) % 12
if self.selectedKey > self.max_key:
self.selectedKey = self.max_key
self.markSelectedKey()
def right(self):
self.smsChar = None
self.selectedKey = self.selectedKey / 12 * 12 + (self.selectedKey + 1) % 12
if self.selectedKey > self.max_key:
self.selectedKey = self.selectedKey / 12 * 12
self.markSelectedKey()
def up(self):
self.smsChar = None
self.selectedKey -= 12
if self.selectedKey < 0:
self.selectedKey = self.max_key / 12 * 12 + self.selectedKey % 12
if self.selectedKey > self.max_key:
self.selectedKey -= 12
self.markSelectedKey()
def down(self):
self.smsChar = None
self.selectedKey += 12
if self.selectedKey > self.max_key:
self.selectedKey = self.selectedKey % 12
self.markSelectedKey()
def keyNumberGlobal(self, number):
self.smsChar = self.sms.getKey(number)
self.selectAsciiKey(self.smsChar)
def smsOK(self):
if self.smsChar and self.selectAsciiKey(self.smsChar):
print "pressing ok now"
self.okClicked()
def keyGotAscii(self):
self.smsChar = None
if self.selectAsciiKey(str(unichr(getPrevAsciiCode()).encode('utf-8'))):
self.okClicked()
def selectAsciiKey(self, char):
if char == " ":
char = "SPACE"
for keyslist in (self.shiftkeys_list, self.keys_list):
selkey = 0
for keys in keyslist:
for key in keys:
if key == char:
self.selectedKey = selkey
if self.shiftMode != (keyslist is self.shiftkeys_list):
self.shiftMode = not self.shiftMode
self.buildVirtualKeyBoard()
else:
self.markSelectedKey()
return True
selkey += 1
return False
| gpl-2.0 |
bailey-lab/bibseq | scripts/setUpScripts/generateCompFile.py | 8 | 1633 | #!/usr/bin/env python3
import shutil, os, argparse, sys, stat
from genFuncs import genHelper
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-CC', type=str)
parser.add_argument('-CXX', type=str)
parser.add_argument('-outname', type=str)
parser.add_argument('-outFilename', type=str, required = True)
parser.add_argument('-externalLoc', type=str)
parser.add_argument('-prefix', type=str)
parser.add_argument('-installName', type=str)
parser.add_argument('-neededLibs', type=str)
parser.add_argument('-ldFlags', type=str)
parser.add_argument('-cxxFlags', type=str)
parser.add_argument('-private', action = "store_true", help="Use private repos")
return parser.parse_args()
def main():
args = parse_args()
CC = genHelper.determineCC(args)
CXX = genHelper.determineCXX(args)
external = "external"
outname = "out"
prefix = "./"
installName = "out"
neededLibs = "none"
ldFlags = ""
cxxFlags = ""
if args.ldFlags and "" != args.ldFlags:
ldFlags = args.ldFlags
if args.cxxFlags and "" != args.cxxFlags:
cxxFlags = args.cxxFlags
if args.externalLoc:
external = args.externalLoc
if args.outname:
outname = args.outname
if args.installName:
installName = args.installName
if args.prefix:
prefix = args.prefix
if args.neededLibs:
neededLibs = args.neededLibs.split(",")
genHelper.generateCompfileFull(args.outFilename, external, CC, CXX, outname, installName, prefix, neededLibs, ldFlags, cxxFlags, args.private)
main()
| gpl-3.0 |
tebriel/dd-agent | tests/checks/mock/test_mesos_slave.py | 45 | 1565 | # stdlib
import json
# 3p
from mock import patch
from nose.plugins.attrib import attr
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest, Fixtures, get_check_class
def _mocked_get_state(*args, **kwargs):
state = json.loads(Fixtures.read_file('state.json'))
return state
def _mocked_get_stats(*args, **kwargs):
stats = json.loads(Fixtures.read_file('stats.json'))
return stats
@attr(requires='mesos_slave')
class TestMesosSlave(AgentCheckTest):
CHECK_NAME = 'mesos_slave'
def test_checks(self):
config = {
'init_config': {},
'instances': [
{
'url': 'http://localhost:5050',
'tasks': ['hello']
}
]
}
klass = get_check_class('mesos_slave')
with patch.object(klass, '_get_state', _mocked_get_state):
with patch.object(klass, '_get_stats', _mocked_get_stats):
check = klass('mesos_slave', {}, {})
self.run_check_twice(config)
metrics = {}
for d in (check.SLAVE_TASKS_METRICS, check.SYSTEM_METRICS, check.SLAVE_RESOURCE_METRICS,
check.SLAVE_EXECUTORS_METRICS, check.STATS_METRICS):
metrics.update(d)
[self.assertMetric(v[0]) for k, v in check.TASK_METRICS.iteritems()]
[self.assertMetric(v[0]) for k, v in metrics.iteritems()]
self.assertServiceCheck('hello.ok', count=1, status=AgentCheck.OK)
| bsd-3-clause |
Panos512/invenio | modules/bibsword/lib/bibsword_client_templates.py | 37 | 41746 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
'''
BibSWORD Client Templates
'''
from invenio.config import CFG_SITE_URL, CFG_SITE_NAME, CFG_SITE_RECORD
class BibSwordTemplate:
'''
This class contains attributes and methods that allows to display all
information used by the BibSword web user interface. Theses informations
are form, validation or error messages
'''
def __init__(self):
''' No init necessary for this class '''
#---------------------------------------------------------------------------
# BibSword WebSubmit Interface
#---------------------------------------------------------------------------
def tmpl_display_submit_ack(self, remote_id, link):
'''
This method generate the html code that displays the acknoledgement
message after the submission of a record.
@param remote_id: id of the record given by arXiv
@param link: links to modify or consult submission
@return: string containing the html code
'''
html = ''
html += '''<h1>Success !</h1>'''
html += '''<p>The record has been successfully pushed to arXiv ! <br />''' \
'''You will get an email once it will be accepted by ''' \
'''arXiv moderator.</p>'''
html += '''<p>The arXiv id of the submission is: <b>%s</b></p>''' % \
remote_id
html += '''<p><a href="www.arxiv.org/user">Manage your submission</a></p>'''
return html
#---------------------------------------------------------------------------
# BibSword Administrator Interface
#---------------------------------------------------------------------------
def tmpl_display_admin_page(self, submissions, first_row, last_row,
total_rows, is_prev, is_last, offset,
error_messages=None):
'''
format the html code that display the submission table
@param submissions: list of all submissions and their status
@return: html code to be displayed
'''
if error_messages == None:
error_messages = []
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
%(error_message)s
<input type="hidden" name="status" value="display_submission"/>
<input type="hidden" name="first_row" value="%(first_row)s"/>
<input type="hidden" name="last_row" value="%(last_row)s"/>
<input type="hidden" name="total_rows" value="%(total_rows)s" />
<input type="submit" name="submit" value="New submission"/><br/>
<br />
<input type="submit" name="submit" value="Refresh all"/><br/>
<br />
Display
<select name="offset">
<option value="5" %(selected_1)s>5</option>
<option value="10" %(selected_2)s>10</option>
<option value="25" %(selected_3)s>25</option>
<option value="50" %(selected_4)s>50</option>
<option value=%(total_rows)s %(selected_5)s>all</option>
</select>
rows per page <input type="submit" name="submit" value="Select" /><br />
<br />
<input type="submit" name="submit" value="First" %(is_prev)s/>
<input type="submit" name="submit" value="Prev" %(is_prev)s/>
Pages %(first_row)s - %(last_row)s / %(total_rows)s
<input type="submit" name="submit" value="Next" %(is_last)s/>
<input type="submit" name="submit" value="Last" %(is_last)s/><br/>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="7" bgcolor="#e6e6fa">
<h2>Submission state</h2>
</td>
</tr>
<tr>
<td align="center" bgcolor="#e6e6fa"><b>Remote server</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Submitter</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Record number</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Remote id</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Status</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Dates</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Links</b></td>
</tr>
%(submissions)s
</table>
</form>''' % {
'error_message': \
self.display_error_message_row(error_messages),
'table_width' : '100%',
'first_row' : first_row,
'last_row' : last_row,
'total_rows' : total_rows,
'is_prev' : is_prev,
'is_last' : is_last,
'selected_1' : offset[0],
'selected_2' : offset[1],
'selected_3' : offset[2],
'selected_4' : offset[3],
'selected_5' : offset[4],
'submissions' : self.fill_submission_table(submissions)
}
return body
def tmpl_display_remote_server_info(self, server_info):
'''
Display a table containing all server informations
@param server_info: tuple containing all server infos
@return: html code for the table containing infos
'''
body = '''<table width="%(table_width)s">\n''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">ID</td>\n''' \
''' <td>%(server_id)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Name</td>\n''' \
''' <td>%(server_name)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Host</td>\n''' \
''' <td>%(server_host)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Username</td>\n''' \
''' <td>%(username)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Password</td>\n''' \
''' <td>%(password)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Email</td>\n''' \
''' <td>%(email)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Realm</td>\n''' \
''' <td>%(realm)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Record URL</td>\n''' \
''' <td>%(url_base_record)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">URL Servicedocument</td>\n'''\
''' <td>%(url_servicedocument)s</td>\n''' \
''' </tr>\n ''' \
'''</table>''' % {
'table_width' : '50%',
'server_id' : server_info['server_id'],
'server_name' : server_info['server_name'],
'server_host' : server_info['server_host'],
'username' : server_info['username'],
'password' : server_info['password'],
'email' : server_info['email'],
'realm' : server_info['realm'],
'url_base_record' : server_info['url_base_record'],
'url_servicedocument': server_info['url_servicedocument']
}
return body
def tmpl_display_remote_servers(self, remote_servers, id_record,
error_messages):
'''
format the html code that display a dropdown list containing the
servers
@param self: reference to the current instance of the class
@param remote_servers: list of tuple containing server's infos
@return: string containing html code
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<input type="hidden" name="status" value="select_server"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Forward a record</h2>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">
<p>Enter the number of the report to submit: </p>
</td>
<td align="left" width="%(row_width)s">
<input type="text" name="id_record" size="20"
value="%(id_record)s"/>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">
<p>Select a remote server: </p>
</td>
<td align="left" width="%(row_width)s">
<select name="id_remote_server" size="1">
<option value="0">-- select a remote server --</option>
%(remote_server)s
</select>
</td>
</tr>
<tr>
<td colspan="2" align="center">
<input type="submit" value="Select" name="submit"/>
</td>
</tr>
</table>
</form>''' % {
'error_message': \
self.display_error_message_row(error_messages),
'table_width' : '100%',
'row_width' : '50%',
'id_record' : id_record,
'remote_server': \
self.fill_dropdown_remote_servers(remote_servers)
}
return body
def tmpl_display_collections(self, selected_server, server_infos,
collections, id_record, recid, error_messages):
'''
format the html code that display the selected server, the informations
about the selected server and a dropdown list containing the server's
collections
@param self: reference to the current instance of the class
@param selected_server: tuple containing selected server name and id
@param server_infos: tuple containing infos about selected server
@param collections: list contianing server's collections
@return: string containing html code
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<input type="hidden" name="status" value="select_collection"/>
<input type="hidden" name="id_remote_server" value="%(id_server)s"/>
<input type="hidden" name="id_record" value="%(id_record)s"/>
<input type="hidden" name="recid" value="%(recid)s"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Remote server</h2></td>
</tr>
<tr>
<td align="center" rowspan="2" valign="center">
<h2>%(server_name)s</h2>
</td>
<td align="left">
SWORD version: %(server_version)s
</td>
</tr>
<tr>
<td align="left">
Max upload size [Kb]: %(server_maxUpload)s
</td>
</tr>
<tr>
<td align="left" colspan="2">
<input type="submit" value="Modify server" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa"><h2>Collection</h2>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">Select a collection: </td>
<td align="left" width="%(row_width)s">
<select name="id_collection" size="1">
<option value="0">-- select a collection --</option>
%(collection)s
</select>
</td>
</tr>
<tr>
<td align="center" colspan="2">
<input type="submit" value="Select" name="submit"/>
</td>
</tr>
</table>
</form>''' % {
'table_width' : '100%',
'row_width' : '50%',
'error_message' : \
self.display_error_message_row(error_messages),
'id_server' : selected_server['id'],
'server_name' : selected_server['name'],
'server_version' : server_infos['version'],
'server_maxUpload': server_infos['maxUploadSize'],
'collection' : \
self.fill_dropdown_collections(collections),
'id_record' : id_record,
'recid' : recid
}
return body
def tmpl_display_categories(self, selected_server, server_infos,
selected_collection, collection_infos,
primary_categories, secondary_categories,
id_record, recid, error_messages):
'''
format the html code that display the selected server, the informations
about the selected server, the selected collections, the informations
about the collection and a dropdown list containing the server's
primary and secondary categories
@param self: reference to the current instance of the class
@param selected_server: tuple containing selected server name and id
@param server_infos: tuple containing infos about selected server
@param selected_collection: selected collection
@param collection_infos: tuple containing infos about selected col
@param primary_categories: list of mandated categories for the col
@return: string containing html code
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<input type="hidden" name="status" value="select_primary_category"/>
<input type="hidden" name="id_remote_server" value="%(id_server)s"/>
<input type="hidden" name="id_collection" value="%(id_collection)s"/>
<input type="hidden" name="id_record" value="%(id_record)s"/>
<input type="hidden" name="recid" value="%(recid)s"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Remote server</h2>
</td>
</tr>
<tr>
<td align="center" rowspan="2" valign="center">
<h2>%(server_name)s</h2>
</td>
<td align="left">
SWORD version: %(server_version)s
</td>
</tr>
<tr>
<td align="left">
Max upload size [Kb]: %(server_maxUpload)s
</td>
</tr>
<tr>
<td align="left" colspan="2">
<input type="submit" value="Modify server" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Collection</h2>
</td>
</tr>
<tr>
<td align="center" rowspan="2" valign="center">
<h2>%(collection_name)s</h2>
</td>
<td align="left">
URL: %(collection_url)s
</td>
</tr>
<tr>
<td align="left">
Accepted media types:
<ul>%(collection_accept)s</ul>
</td>
</tr>
<tr>
<td align="left" colspan=2>
<input type="submit" value="Modify collection" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Mandatory category</h2>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">
<p>Select a mandated category: </p>
</td>
<td align="left" width="%(row_width)s">
<select name="id_primary" size="1">
<option value="0">-- select a category --</option>
%(primary_categories)s
</select>
</td>
</tr>
</table>
<p></p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Optional categories</h2>
</td>
</tr>
<td align="right" width="%(row_width)s">
<p>Select optional categories: </p>
</td>
<td align="left" width="%(row_width)s">
<select name="id_categories" size="10" multiple>
%(secondary_categories)s
</select>
</td>
</tr>
</table>
<p> </p>
<center>
<input type="submit" value="Select" name="submit"/>
</center>
</form>''' % {
'table_width' : '100%',
'row_width' : '50%',
'error_message' : self.display_error_message_row(
error_messages),
# hidden input
'id_server' : selected_server['id'],
'id_collection' : selected_collection['id'],
'id_record' : id_record,
'recid' : recid,
# variables values
'server_name' : selected_server['name'],
'server_version' : server_infos['version'],
'server_maxUpload' : server_infos['maxUploadSize'],
'collection_name' : selected_collection['label'],
'collection_accept': ''.join([
'''<li>%(name)s </li>''' % {
'name': accept
} for accept in collection_infos['accept'] ]),
'collection_url' : selected_collection['url'],
'primary_categories' : self.fill_dropdown_primary(
primary_categories),
'secondary_categories': self.fill_dropdown_secondary(
secondary_categories)
}
return body
def tmpl_display_metadata(self, user, server, collection, primary,
categories, medias, metadata, id_record, recid,
error_messages):
'''
format a string containing every informations before a submission
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<input type="hidden" name="status" value="check_submission"/>
<input type="hidden" name="id_remote_server" value="%(id_server)s"/>
<input type="hidden" name="id_collection" value="%(id_collection)s"/>
<input type="hidden" name="id_primary" value="%(id_primary)s"/>
<input type="hidden" name="id_categories" value="%(id_categories)s"/>
<input type="hidden" name="id_record" value="%(id_record)s"/>
<input type="hidden" name="recid" value="%(recid)s"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Destination</h2>
</td>
</tr>
<tr>
<td align="center" rowspan="3" valign="center">
<h2>%(server_name)s</h2>
</td>
<td align="left">
Collection: %(collection_name)s ( %(collection_url)s )
</td>
</tr>
<tr>
<td align="left">
Primary category: %(primary_name)s ( %(primary_url)s )
</td>
</tr>
%(categories)s
<tr>
<td align="left" colspan="2">
<input type="submit" value="Modify destination" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="4" bgcolor="#e6e6fa">
<h2>Submitter</h2>
</td>
</tr>
<tr>
<td width="%(row_width)s">Name:</td>
<td><input type="text" name="author_name" size="100"
value="%(user_name)s"/></td>
</tr>
<tr>
<td>Email:</td>
<td><input type="text" name="author_email" size="100"
value="%(user_email)s"/></td>
</tr>
</table>
<p></p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="4" bgcolor="#e6e6fa"><h2>Media</h2></td>
</tr>
<tr><td colspan="4">%(medias)s%(media_help)s</td></tr>
</table>
<p></p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="3" bgcolor="#e6e6fa"><h2>Metadata</h2> <font color="red"><b>Warning:</b> modification(s) will not be saved on the %(CFG_SITE_NAME)s</font>
</td>
</tr>
<tr>
<td align="left" width="%(row_width)s"><p>Report Number<span style="color:#f00">*</span>:</p></td>
<td><input type="text" name="id" size="100" value="%(id)s"/></td>
</tr>
<tr>
<td align="left" width="%(row_width)s"><p>Title<span style="color:#f00">*</span>:</p></td>
<td><input type="text" name="title" size="100" value="%(title)s"/>
</td>
</tr>
<tr>
<td align="left" width="%(row_width)s"><p>Summary<span style="color:#f00">*</span>:</p></td>
<td>
<textarea name="summary" rows="4" cols="100">%(summary)s
</textarea>
</td>
</tr>
%(contributors)s
%(journal_refs)s
%(report_nos)s
</table>
<p><font color="red">The fields having a * are mandatory</font></p>
<center>
<input type="submit" value="Submit" name="submit"/>
</center>
<form>''' % {
'table_width' : '100%',
'row_width' : '25%',
'error_message' : \
self.display_error_message_row(error_messages),
'CFG_SITE_NAME': CFG_SITE_NAME,
# hidden input
'id_server' : server['id'],
'id_collection' : collection['id'],
'id_primary' : primary['id'],
'id_categories' : self.get_list_id_categories(categories),
'id_record' : id_record,
'recid' : recid,
# variables values
'server_name' : server['name'],
'collection_name' : collection['label'],
'collection_url' : collection['url'],
'primary_name' : primary['label'],
'primary_url' : primary['url'],
'categories' : self.fill_optional_category_list(categories),
#user
'user_name' : user['nickname'],
'user_email' : user['email'],
# media
'medias' : self.fill_media_list(medias, server['id']),
'media_help' : self.fill_arxiv_help_message(),
# metadata
'id' : metadata['id'],
'title' : metadata['title'],
'summary' : metadata['summary'],
'contributors' : self.fill_contributors_list(
metadata['contributors']),
'journal_refs' : self.fill_journal_refs_list(
metadata['journal_refs']),
'report_nos' : self.fill_report_nos_list(
metadata['report_nos'])
}
return body
def tmpl_display_list_submission(self, submissions):
'''
Display the data of submitted recods
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="7" bgcolor="#e6e6fa">
<h2>Document successfully submitted !</h2>
</td>
</tr>
<tr>
<td align="center" bgcolor="#e6e6fa"><b>Remote server</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Submitter</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Record id</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Remote id</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Status</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Dates</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Links</b></td>
</tr>
%(submissions)s
</table>
<a href=%(CFG_SITE_URL)s/bibsword>Return</a>
</form>''' % {
'table_width' : '100%',
'submissions' : self.fill_submission_table(submissions),
'CFG_SITE_URL' : CFG_SITE_URL
}
return body
#***************************************************************************
# Private functions
#***************************************************************************
def display_error_message_row(self, error_messages):
'''
return a list of error_message in form of a bullet list
@param error_messages: list of error_messages to display
@return: html code that display list of errors
'''
# if no errors, return nothing
if len(error_messages) == 0:
return ''
if len(error_messages) == 1:
# display a generic header message
body = '''
<tr>
<td align="left" colspan=2>
<font color='red'>
<p> The following error was found: </p>
<ul>
'''
else:
# display a generic header message
body = '''
<tr>
<td align="left" colspan=2>
<font color='red'>
<p> Following errors were found: </p>
<ul>
'''
# insert each error lines
for error_message in error_messages:
body = body + '''
<li>%(error)s</li>''' % {
'error': error_message
}
body = body + '''
</ul>
</font>
</td>
</tr>'''
return body
def fill_submission_table(self, submissions):
'''
This method return the body of the submission state table. each
submissions given in parameters has one row
@param submissions: submission status list
@return: html table body
'''
return ''.join([
''' <tr>
<td>%(id_server)s: <a href="%(server_infos)s">
%(server_name)s</a></td>
<td>%(user_name)s <br/> %(user_email)s</td
<td>%(id_bibrec)s: <a href="%(cfg_site_url)s/%(CFG_SITE_RECORD)s/%(id_bibrec)s"
target="_blank">%(no_bibrec)s</a></td>
<td><a href="%(url_base_remote)s/%(id_remote)s" target="_blank">
%(id_remote)s</a></td>
<td>%(status)s</td>
<td><b>submission: </b> %(submission_date)s <br/>
<b>publication: </b> %(publication_date)s <br/>
<b>removal: </b> %(removal_date)s </td>
<td><b>media: </b> <a href="%(media_link)s" target="_blank">
%(media_link)s</a> <br/>
<b>metadata: </b> <a href="%(metadata_link)s" target="_blank">
%(metadata_link)s</a> <br />
<b>status: </b> <a href="%(status_link)s" target="_blank">
%(status_link)s</a></td>
</tr>''' % {
'id_server' : str(submission['id_server']),
'server_infos' : "%s/bibsword/remoteserverinfos?id=%s" % \
(CFG_SITE_URL, submission['id_server']),
'server_name' : str(submission['server_name']),
'user_name' : str(submission['user_name']),
'user_email' : str(submission['user_email']),
'id_bibrec' : str(submission['id_record']),
'no_bibrec' : str(submission['report_no']),
'id_remote' : str(submission['id_remote']),
'status' : str(submission['status']),
'submission_date' : str(submission['submission_date']),
'publication_date' : str(submission['publication_date']),
'removal_date' : str(submission['removal_date']),
'media_link' : str(submission['link_medias']),
'metadata_link' : str(submission['link_metadata']),
'status_link' : str(submission['link_status']),
'url_base_remote' : str(submission['url_base_remote']),
'cfg_site_url' : CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD
} for submission in submissions])
def fill_dropdown_remote_servers(self, remote_servers):
'''
This method fill a dropdown list of remote servers.
@return: html code to display
'''
return ''.join([
'''<option value="%(id)s">%(name)s - %(host)s</option>''' % {
'id': str(remote_server['id']),
'name': remote_server['name'],
'host': remote_server['host']
} for remote_server in remote_servers])
def fill_dropdown_collections(self, collections):
'''
This method fill a dropdown list of collection.
@param collections: list of all collections with name - url
@return: html code to display
'''
return ''.join([
'''<option value="%(id)s">%(name)s</option>''' % {
'id': str(collection['id']),
'name': collection['label']
} for collection in collections])
def fill_dropdown_primary(self, primary_categories):
'''
This method fill the primary dropdown list with the data given in
parameter
@param primary_categories: list of 'url' 'name' tuples
@return: html code generated to display the list
'''
return ''.join([
'''<option value="%(id)s">%(name)s</option>''' % {
'id': primary_categorie['id'],
'name': primary_categorie['label']
} for primary_categorie in primary_categories])
def fill_dropdown_secondary(self, categories):
'''
This method fill a category list. This list is allows the multi-selection
or items. To proced to select more than one categorie through a browser
ctrl + clic
@param categories: list of all categories in the format name - url
@return: the html code that display each dropdown list
'''
if len(categories) == '':
return ''
return ''.join([
'''<option value="%(id)s">%(name)s</option>''' % {
'id': category['id'],
'name': category['label']
} for category in categories])
def fill_optional_category_list(self, categories):
'''
This method fill a table row that contains name and url of the selected
optional categories
@param self: reference to the current instance of the class
@param categories: list of tuples containing selected categories
@return: html code generated to display the list
'''
if len(categories) == 0:
return ''
else:
body = '<tr><td>'
body = body + ''.join([
'''<p>Category: %(category_name)s ( %(category_url)s )</p>'''%{
'category_name' : category['label'],
'category_url' : category['url']
} for category in categories
])
body = body + '</td></tr>'
return body
def fill_media_list(self, medias, id_server, from_websubmit=False):
'''
Concatenate the string that contains all informations about the medias
'''
text = ''
if id_server == 1:
media_type = self.format_media_list_by_type(medias)
text = '''<h2>Please select files you would like to push to arXiv:</h2>'''
for mtype in media_type:
text += '''<h3><b>%s: </b></h3>''' % mtype['media_type']
text += '''<blockquote>'''
for media in mtype['media_list']:
text += '''<input type='checkbox' name="media" value="%s" %s>%s</input><br />''' % (media['path'], media['selected'], media['name'])
text += "</blockquote>"
text += '''<h3>Upload</h3>'''
text += '''<blockquote>'''
text += '''<p>In addition, you can submit a new file (that will be added to the record as well):</p>'''
if from_websubmit == False:
text += '''<input type="file" name="new_media" size="60"/>'''
return text
def fill_arxiv_help_message(self):
text = '''</blockquote><h3>Help</h3>'''
text += '''<blockquote><p>For more help on which formats are supported by arXiv, please see:'''\
'''<ul>'''\
'''<li><a href="http://arxiv.org/help/submit" target="_blank">'''\
'''arXiv submission process</a></li>'''\
'''<li><a href="http://arxiv.org/help/submit_tex" target="_blank">'''\
'''arXiv TeX submission</a></li>'''\
'''<li><a href="http://arxiv.org/help/submit_docx" target="_blank">'''\
'''arXiv Docx submission</a></li>'''\
'''<li><a href="http://arxiv.org/help/submit_pdf" target="_blank">'''\
'''arXiv PDF submission</a></li>'''\
'''</ul></blockquote>'''
return text
def fill_contributors_list(self, contributors):
'''
This method display each contributors in the format of an editable input
text. This allows the user to modifie it.
@param contributors: The list of all contributors of the document
@return: the html code that display each dropdown list
'''
output = ''
is_author = True
for author in contributors:
nb_rows = 2
author_name = \
'''<LABEL for="name">Name: </LABEL><input type = "text" ''' \
'''name = "contributor_name" size = "100" value = "%s" ''' \
'''id="name"/>''' % author['name']
author_email = \
'''<LABEL for = "email">Email: </LABEL>''' \
'''<input type = "text" name = "contributor_email" ''' \
'''size = "100" value = "%s" id = "email"/>''' % author['email']
author_affiliations = []
for affiliation in author['affiliation']:
affiliation_row = \
'''<LABEL for = "affiliation">Affiliation: </LABEL> ''' \
'''<input type="text" name = "contributor_affiliation" ''' \
'''size = "100" value = "%s" id = "affiliation"/>''' % \
affiliation
author_affiliations.append(affiliation_row)
nb_rows = nb_rows + 1
affiliation_row = \
'''<LABEL for = "affiliation">Affiliation: </LABEL>''' \
'''<input type = "text" name = "contributor_affiliation" ''' \
'''size = "100" id = "affiliation"/>'''
author_affiliations.append(affiliation_row)
nb_rows = nb_rows + 1
if is_author:
output += '''<tr><td rowspan = "%s">Author: </td>''' % nb_rows
is_author = False
else:
output += '''<tr><td rowspan = "%s">Contributor: </td>''' % \
nb_rows
output += '''<td>%s</td></tr>''' % author_name
if author_email != '':
output += '''<tr><td>%s</td></tr>''' % author_email
for affiliation in author_affiliations:
output += '''<tr><td>%s</td></tr>''' % affiliation
output += \
'''<input type = "hidden" name = "contributor_affiliation" ''' \
'''value = "next"/>'''
return output
def fill_journal_refs_list(self, journal_refs):
'''
This method display each journal references in the format of an editable
input text. This allows the user to modifie it.
@param journal_refs: The list of all journal references of the document
@return: the html code that display each dropdown list
'''
html = ''
if len(journal_refs) > 0:
html += '''
<tr>
<td align="left"><p>Journal references: </p></td><td>
'''
html = html + ''.join([
'''
<p><input type="text" name="journal_refs" size="100" ''' \
'''value="%(journal_ref)s"/></p>
''' % {
'journal_ref': journal_ref
} for journal_ref in journal_refs
])
html = html + '''
</td>
</tr>
'''
return html
def fill_report_nos_list(self, report_nos):
'''
Concatate a string containing the report number html table rows
'''
html = ''
if len(report_nos) > 0:
html = '''
<tr>
<td align="left"><p>Report numbers: </p></td><td>
'''
html = html + ''.join([
'''
<p><input type="text" name="report_nos" size="100" ''' \
'''value="%(report_no)s"/></p>''' % {
'report_no': report_no
} for report_no in report_nos
])
html = html + '''
</td>
</tr>
'''
return html
def get_list_id_categories(self, categories):
'''
gives the id of the categores tuple
'''
id_categories = []
for category in categories:
id_categories.append(category['id'])
return id_categories
def format_media_list_by_type(self, medias):
'''
This function format the media by type (Main, Uploaded, ...)
'''
#format media list by type of document
media_type = []
for media in medias:
# if it is the first media of this type, create a new type
is_type_in_media_type = False
for type in media_type:
if media['collection'] == type['media_type']:
is_type_in_media_type = True
if is_type_in_media_type == False:
type = {}
type['media_type'] = media['collection']
type['media_list'] = []
media_type.append(type)
# insert the media in the good media_type element
for type in media_type:
if type['media_type'] == media['collection']:
type['media_list'].append(media)
return media_type
| gpl-2.0 |
philippze/django-cms | cms/tests/test_templatetags.py | 17 | 20005 | from __future__ import with_statement
from copy import deepcopy
import os
from classytags.tests import DummyParser, DummyTokens
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.contrib.sites.models import Site
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.template import RequestContext, Context
from django.test import RequestFactory, TestCase
from django.template.base import Template
from django.utils.html import escape
from django.utils.timezone import now
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from cms.api import create_page, create_title, add_plugin
from cms.middleware.toolbar import ToolbarMiddleware
from cms.models.pagemodel import Page, Placeholder
from cms.templatetags.cms_tags import (_get_page_by_untyped_arg,
_show_placeholder_for_page,
_get_placeholder, RenderPlugin)
from cms.templatetags.cms_js_tags import json_filter
from cms.test_utils.fixtures.templatetags import TwoPagesFixture
from cms.test_utils.testcases import CMSTestCase
from cms.toolbar.toolbar import CMSToolbar
from cms.utils import get_cms_setting, get_site_id
from cms.utils.compat import DJANGO_1_7
from cms.utils.placeholder import get_placeholders
from sekizai.context import SekizaiContext
class TemplatetagTests(TestCase):
def test_get_site_id_from_nothing(self):
with self.settings(SITE_ID=10):
self.assertEqual(10, get_site_id(None))
def test_get_site_id_from_int(self):
self.assertEqual(10, get_site_id(10))
def test_get_site_id_from_site(self):
site = Site()
site.id = 10
self.assertEqual(10, get_site_id(site))
def test_get_site_id_from_str_int(self):
self.assertEqual(10, get_site_id('10'))
def test_get_site_id_from_str(self):
with self.settings(SITE_ID=10):
self.assertEqual(10, get_site_id("something"))
def test_unicode_placeholder_name_fails_fast(self):
self.assertRaises(ImproperlyConfigured, get_placeholders, 'unicode_placeholder.html')
def test_page_attribute_tag_escapes_content(self):
script = '<script>alert("XSS");</script>'
class FakePage(object):
def get_page_title(self, *args, **kwargs):
return script
class FakeRequest(object):
current_page = FakePage()
REQUEST = {'language': 'en'}
request = FakeRequest()
template = Template('{% load cms_tags %}{% page_attribute page_title %}')
context = Context({'request': request})
output = template.render(context)
self.assertNotEqual(script, output)
self.assertEqual(escape(script), output)
def test_json_encoder(self):
self.assertEqual(json_filter(True), 'true')
self.assertEqual(json_filter(False), 'false')
self.assertEqual(json_filter([1, 2, 3]), '[1, 2, 3]')
self.assertEqual(json_filter((1, 2, 3)), '[1, 2, 3]')
filtered_dict = json_filter({'item1': 1, 'item2': 2, 'item3': 3})
self.assertTrue('"item1": 1' in filtered_dict)
self.assertTrue('"item2": 2' in filtered_dict)
self.assertTrue('"item3": 3' in filtered_dict)
today = now().today()
self.assertEqual('"%s"' % today.isoformat()[:-3], json_filter(today))
class TemplatetagDatabaseTests(TwoPagesFixture, CMSTestCase):
def _getfirst(self):
return Page.objects.public().get(title_set__title='first')
def _getsecond(self):
return Page.objects.public().get(title_set__title='second')
def test_get_page_by_untyped_arg_none(self):
control = self._getfirst()
request = self.get_request('/')
request.current_page = control
page = _get_page_by_untyped_arg(None, request, 1)
self.assertEqual(page, control)
def test_get_page_by_pk_arg_edit_mode(self):
control = self._getfirst()
request = self.get_request('/')
request.GET = {"edit": ''}
user = self._create_user("admin", True, True)
request.current_page = control
request.user = user
middleware = ToolbarMiddleware()
middleware.process_request(request)
page = _get_page_by_untyped_arg(control.pk, request, 1)
self.assertEqual(page, control.publisher_draft)
def test_get_page_by_untyped_arg_page(self):
control = self._getfirst()
request = self.get_request('/')
page = _get_page_by_untyped_arg(control, request, 1)
self.assertEqual(page, control)
def test_get_page_by_untyped_arg_reverse_id(self):
second = self._getsecond()
request = self.get_request('/')
page = _get_page_by_untyped_arg("myreverseid", request, 1)
self.assertEqual(page, second)
def test_get_page_by_untyped_arg_dict(self):
second = self._getsecond()
request = self.get_request('/')
page = _get_page_by_untyped_arg({'pk': second.pk}, request, 1)
self.assertEqual(page, second)
def test_get_page_by_untyped_arg_dict_fail_debug(self):
with self.settings(DEBUG=True):
request = self.get_request('/')
self.assertRaises(Page.DoesNotExist,
_get_page_by_untyped_arg, {'pk': 1003}, request, 1
)
self.assertEqual(len(mail.outbox), 0)
def test_get_page_by_untyped_arg_dict_fail_nodebug_do_email(self):
with self.settings(SEND_BROKEN_LINK_EMAILS=True, DEBUG=False,
MANAGERS=[("Jenkins", "tests@django-cms.org")]):
request = self.get_request('/')
page = _get_page_by_untyped_arg({'pk': 1003}, request, 1)
self.assertEqual(page, None)
self.assertEqual(len(mail.outbox), 1)
def test_get_page_by_untyped_arg_dict_fail_nodebug_no_email(self):
with self.settings(SEND_BROKEN_LINK_EMAILS=False, DEBUG=False,
MANAGERS=[("Jenkins", "tests@django-cms.org")]):
request = self.get_request('/')
page = _get_page_by_untyped_arg({'pk': 1003}, request, 1)
self.assertEqual(page, None)
self.assertEqual(len(mail.outbox), 0)
def test_get_page_by_untyped_arg_fail(self):
request = self.get_request('/')
self.assertRaises(TypeError, _get_page_by_untyped_arg, [], request, 1)
def test_show_placeholder_for_page_placeholder_does_not_exist(self):
"""
Verify ``show_placeholder`` correctly handles being given an
invalid identifier.
"""
with self.settings(DEBUG=True):
context = self.get_context('/')
self.assertRaises(Placeholder.DoesNotExist, _show_placeholder_for_page,
context, 'does_not_exist', 'myreverseid')
with self.settings(DEBUG=False):
content = _show_placeholder_for_page(context, 'does_not_exist', 'myreverseid')
self.assertEqual(content['content'], '')
def test_untranslated_language_url(self):
""" Tests page_language_url templatetag behavior when used on a page
without the requested translation, both when CMS_HIDE_UNTRANSLATED is
True and False.
When True it should return the root page URL if the current page is
untranslated (PR #1125)
"""
page_1 = create_page('Page 1', 'nav_playground.html', 'en', published=True,
in_navigation=True, reverse_id='page1')
create_title("de", "Seite 1", page_1, slug="seite-1")
page_1.publish('en')
page_1.publish('de')
page_2 = create_page('Page 2', 'nav_playground.html', 'en', page_1, published=True,
in_navigation=True, reverse_id='page2')
create_title("de", "Seite 2", page_2, slug="seite-2")
page_2.publish('en')
page_2.publish('de')
page_3 = create_page('Page 3', 'nav_playground.html', 'en', page_2, published=True,
in_navigation=True, reverse_id='page3')
tpl = Template("{% load menu_tags %}{% page_language_url 'de' %}")
lang_settings = deepcopy(get_cms_setting('LANGUAGES'))
lang_settings[1][1]['hide_untranslated'] = False
with self.settings(CMS_LANGUAGES=lang_settings):
context = self.get_context(page_2.get_absolute_url())
context['request'].current_page = page_2
res = tpl.render(context)
self.assertEqual(res, "/de/seite-2/")
# Default configuration has CMS_HIDE_UNTRANSLATED=False
context = self.get_context(page_2.get_absolute_url())
context['request'].current_page = page_2.publisher_public
res = tpl.render(context)
self.assertEqual(res, "/de/seite-2/")
context = self.get_context(page_3.get_absolute_url())
context['request'].current_page = page_3.publisher_public
res = tpl.render(context)
self.assertEqual(res, "/de/page-3/")
lang_settings[1][1]['hide_untranslated'] = True
with self.settings(CMS_LANGUAGES=lang_settings):
context = self.get_context(page_2.get_absolute_url())
context['request'].current_page = page_2.publisher_public
res = tpl.render(context)
self.assertEqual(res, "/de/seite-2/")
context = self.get_context(page_3.get_absolute_url())
context['request'].current_page = page_3.publisher_public
res = tpl.render(context)
self.assertEqual(res, "/de/")
def test_create_placeholder_if_not_exist_in_template(self):
"""
Tests that adding a new placeholder to a an exising page's template
creates the placeholder.
"""
page = create_page('Test', 'col_two.html', 'en')
# I need to make it seem like the user added another plcaeholder to the SAME template.
page._template_cache = 'col_three.html'
class FakeRequest(object):
current_page = page
REQUEST = {'language': 'en'}
placeholder = _get_placeholder(page, page, dict(request=FakeRequest()), 'col_right')
page.placeholders.get(slot='col_right')
self.assertEqual(placeholder.slot, 'col_right')
class NoFixtureDatabaseTemplateTagTests(CMSTestCase):
def test_cached_show_placeholder_sekizai(self):
from django.core.cache import cache
cache.clear()
from cms.test_utils import project
template_dir = os.path.join(os.path.dirname(project.__file__), 'templates', 'alt_plugin_templates',
'show_placeholder')
page = create_page('Test', 'col_two.html', 'en')
placeholder = page.placeholders.all()[0]
add_plugin(placeholder, TextPlugin, 'en', body='HIDDEN')
request = RequestFactory().get('/')
request.user = self.get_staff_user_with_no_permissions()
request.current_page = page
if DJANGO_1_7:
override = {'TEMPLATE_DIRS': [template_dir], 'CMS_TEMPLATES': []}
else:
override = {'TEMPLATES': deepcopy(settings.TEMPLATES)}
override['TEMPLATES'][0]['DIRS'] = [template_dir]
with self.settings(**override):
template = Template(
"{% load cms_tags sekizai_tags %}{% show_placeholder slot page 'en' 1 %}{% render_block 'js' %}")
context = RequestContext(request, {'page': page, 'slot': placeholder.slot})
output = template.render(context)
self.assertIn('JAVASCRIPT', output)
context = RequestContext(request, {'page': page, 'slot': placeholder.slot})
output = template.render(context)
self.assertIn('JAVASCRIPT', output)
def test_show_placeholder_for_page_marks_output_safe(self):
from django.core.cache import cache
cache.clear()
page = create_page('Test', 'col_two.html', 'en')
placeholder = page.placeholders.all()[0]
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
request = RequestFactory().get('/')
request.user = AnonymousUser()
request.current_page = page
template = Template(
"{% load cms_tags sekizai_tags %}{% show_placeholder slot page 'en' 1 %}{% render_block 'js' %}")
context = RequestContext(request, {'page': page, 'slot': placeholder.slot})
with self.assertNumQueries(4):
output = template.render(context)
self.assertIn('<b>Test</b>', output)
context = RequestContext(request, {'page': page, 'slot': placeholder.slot})
with self.assertNumQueries(0):
output = template.render(context)
self.assertIn('<b>Test</b>', output)
def test_cached_show_placeholder_preview(self):
from django.core.cache import cache
cache.clear()
page = create_page('Test', 'col_two.html', 'en', published=True)
placeholder = page.placeholders.all()[0]
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
request = RequestFactory().get('/')
user = self._create_user("admin", True, True)
request.current_page = page.publisher_public
request.user = user
template = Template(
"{% load cms_tags %}{% show_placeholder slot page 'en' 1 %}")
context = RequestContext(request, {'page': page, 'slot': placeholder.slot})
with self.assertNumQueries(4):
output = template.render(context)
self.assertIn('<b>Test</b>', output)
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test2</b>')
request = RequestFactory().get('/?preview')
request.current_page = page
request.user = user
context = RequestContext(request, {'page': page, 'slot': placeholder.slot})
with self.assertNumQueries(4):
output = template.render(context)
self.assertIn('<b>Test2</b>', output)
def test_render_plugin(self):
from django.core.cache import cache
cache.clear()
page = create_page('Test', 'col_two.html', 'en', published=True)
placeholder = page.placeholders.all()[0]
plugin = add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
template = Template(
"{% load cms_tags %}{% render_plugin plugin %}")
request = RequestFactory().get('/')
user = self._create_user("admin", True, True)
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
context = RequestContext(request, {'plugin': plugin})
with self.assertNumQueries(0):
output = template.render(context)
self.assertIn('<b>Test</b>', output)
def test_render_plugin_no_context(self):
placeholder = Placeholder.objects.create(slot='test')
plugin = add_plugin(placeholder, TextPlugin, 'en', body='Test')
parser = DummyParser()
tokens = DummyTokens(plugin)
tag = RenderPlugin(parser, tokens)
superuser = self.get_superuser()
request = RequestFactory().get('/')
request.current_page = None
request.user = superuser
request.session = {}
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
context = SekizaiContext({
'request': request
})
output = tag.render(context)
self.assertEqual(
output,
'<div class="cms-plugin cms-plugin-{0}">Test</div>'.format(
plugin.pk
)
)
def test_render_placeholder_with_no_page(self):
page = create_page('Test', 'col_two.html', 'en', published=True)
template = Template(
"{% load cms_tags %}{% placeholder test or %}< --- empty --->{% endplaceholder %}")
request = RequestFactory().get('/asdadsaasd/')
user = self.get_superuser()
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
request.toolbar.is_staff = True
context = RequestContext(request)
with self.assertNumQueries(4):
template.render(context)
def test_render_placeholder_as_var(self):
page = create_page('Test', 'col_two.html', 'en', published=True)
template = Template(
"{% load cms_tags %}{% placeholder test or %}< --- empty --->{% endplaceholder %}")
request = RequestFactory().get('/asdadsaasd/')
user = self.get_superuser()
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
request.toolbar.is_staff = True
context = RequestContext(request)
with self.assertNumQueries(4):
template.render(context)
def test_render_model_add(self):
from django.core.cache import cache
from cms.test_utils.project.sampleapp.models import Category
cache.clear()
page = create_page('Test', 'col_two.html', 'en', published=True)
template = Template(
"{% load cms_tags %}{% render_model_add category %}")
user = self._create_user("admin", True, True)
request = RequestFactory().get('/')
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
request.toolbar.is_staff = True
context = RequestContext(request, {'category': Category()})
with self.assertNumQueries(0):
output = template.render(context)
expected = 'cms-plugin cms-plugin-sampleapp-category-add-0 '
'cms-render-model-add'
self.assertIn(expected, output)
# Now test that it does NOT render when not in edit mode
request = RequestFactory().get('/')
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
context = RequestContext(request, {'category': Category()})
with self.assertNumQueries(0):
output = template.render(context)
expected = ''
self.assertEqual(expected, output)
def test_render_model_add_block(self):
from django.core.cache import cache
from cms.test_utils.project.sampleapp.models import Category
cache.clear()
page = create_page('Test', 'col_two.html', 'en', published=True)
template = Template(
"{% load cms_tags %}{% render_model_add_block category %}wrapped{% endrender_model_add_block %}")
user = self._create_user("admin", True, True)
request = RequestFactory().get('/')
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
request.toolbar.is_staff = True
context = RequestContext(request, {'category': Category()})
with self.assertNumQueries(0):
output = template.render(context)
expected = 'cms-plugin cms-plugin-sampleapp-category-add-0 '
'cms-render-model-add'
self.assertIn(expected, output)
# Now test that it does NOT render when not in edit mode
request = RequestFactory().get('/')
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
context = RequestContext(request, {'category': Category()})
with self.assertNumQueries(0):
output = template.render(context)
expected = 'wrapped'
self.assertEqual(expected, output)
| bsd-3-clause |
naousse/odoo | openerp/modules/__init__.py | 352 | 1516 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Modules (also called addons) management.
"""
from . import db, graph, loading, migration, module, registry
from openerp.modules.loading import load_modules
from openerp.modules.module import get_modules, get_modules_with_version, \
load_information_from_description_file, get_module_resource, get_module_path, \
initialize_sys_path, load_openerp_module, init_module_models, adapt_version
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hydrospanner/DForurm | DForurm/env/Lib/site-packages/django/contrib/admin/views/main.py | 49 | 16694 | import sys
from collections import OrderedDict
from django.contrib.admin import FieldListFilter
from django.contrib.admin.exceptions import (
DisallowedModelAdminLookup, DisallowedModelAdminToField,
)
from django.contrib.admin.options import (
IS_POPUP_VAR, TO_FIELD_VAR, IncorrectLookupParameters,
)
from django.contrib.admin.utils import (
get_fields_from_path, lookup_needs_distinct, prepare_lookup_value, quote,
)
from django.core.exceptions import (
FieldDoesNotExist, ImproperlyConfigured, SuspiciousOperation,
)
from django.core.paginator import InvalidPage
from django.db import models
from django.urls import reverse
from django.utils import six
from django.utils.encoding import force_text
from django.utils.http import urlencode
from django.utils.translation import ugettext
# Changelist settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
ERROR_FLAG = 'e'
IGNORED_PARAMS = (
ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR)
class ChangeList(object):
def __init__(self, request, model, list_display, list_display_links,
list_filter, date_hierarchy, search_fields, list_select_related,
list_per_page, list_max_show_all, list_editable, model_admin):
self.model = model
self.opts = model._meta
self.lookup_opts = self.opts
self.root_queryset = model_admin.get_queryset(request)
self.list_display = list_display
self.list_display_links = list_display_links
self.list_filter = list_filter
self.date_hierarchy = date_hierarchy
self.search_fields = search_fields
self.list_select_related = list_select_related
self.list_per_page = list_per_page
self.list_max_show_all = list_max_show_all
self.model_admin = model_admin
self.preserved_filters = model_admin.get_preserved_filters(request)
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.show_all = ALL_VAR in request.GET
self.is_popup = IS_POPUP_VAR in request.GET
to_field = request.GET.get(TO_FIELD_VAR)
if to_field and not model_admin.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
self.to_field = to_field
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
if self.is_popup:
self.list_editable = ()
else:
self.list_editable = list_editable
self.query = request.GET.get(SEARCH_VAR, '')
self.queryset = self.get_queryset(request)
self.get_results(request)
if self.is_popup:
title = ugettext('Select %s')
else:
title = ugettext('Select %s to change')
self.title = title % force_text(self.opts.verbose_name)
self.pk_attname = self.lookup_opts.pk.attname
def get_filters_params(self, params=None):
"""
Returns all params except IGNORED_PARAMS
"""
if not params:
params = self.params
lookup_params = params.copy() # a dictionary of the query string
# Remove all the parameters that are globally and systematically
# ignored.
for ignored in IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
return lookup_params
def get_filters(self, request):
lookup_params = self.get_filters_params()
use_distinct = False
for key, value in lookup_params.items():
if not self.model_admin.lookup_allowed(key, value):
raise DisallowedModelAdminLookup("Filtering by %s not allowed" % key)
filter_specs = []
if self.list_filter:
for list_filter in self.list_filter:
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(request, lookup_params, self.model, self.model_admin)
else:
field_path = None
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for
# the type of the given field.
field, field_list_filter_class = list_filter, FieldListFilter.create
if not isinstance(field, models.Field):
field_path = field
field = get_fields_from_path(self.model, field_path)[-1]
lookup_params_count = len(lookup_params)
spec = field_list_filter_class(
field, request, lookup_params,
self.model, self.model_admin, field_path=field_path
)
# field_list_filter_class removes any lookup_params it
# processes. If that happened, check if distinct() is
# needed to remove duplicate results.
if lookup_params_count > len(lookup_params):
use_distinct = use_distinct or lookup_needs_distinct(self.lookup_opts, field_path)
if spec and spec.has_output():
filter_specs.append(spec)
# At this point, all the parameters used by the various ListFilters
# have been removed from lookup_params, which now only contains other
# parameters passed via the query string. We now loop through the
# remaining parameters both to ensure that all the parameters are valid
# fields and to determine if at least one of them needs distinct(). If
# the lookup parameters aren't real fields, then bail out.
try:
for key, value in lookup_params.items():
lookup_params[key] = prepare_lookup_value(key, value)
use_distinct = use_distinct or lookup_needs_distinct(self.lookup_opts, key)
return filter_specs, bool(filter_specs), lookup_params, use_distinct
except FieldDoesNotExist as e:
six.reraise(IncorrectLookupParameters, IncorrectLookupParameters(e), sys.exc_info()[2])
def get_query_string(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = self.params.copy()
for r in remove:
for k in list(p):
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(sorted(p.items()))
def get_results(self, request):
paginator = self.model_admin.get_paginator(request, self.queryset, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
# Get the total number of objects, with no admin filters applied.
if self.model_admin.show_full_result_count:
full_result_count = self.root_queryset.count()
else:
full_result_count = None
can_show_all = result_count <= self.list_max_show_all
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and can_show_all) or not multi_page:
result_list = self.queryset._clone()
else:
try:
result_list = paginator.page(self.page_num + 1).object_list
except InvalidPage:
raise IncorrectLookupParameters
self.result_count = result_count
self.show_full_result_count = self.model_admin.show_full_result_count
# Admin actions are shown if there is at least one entry
# or if entries are not counted because show_full_result_count is disabled
self.show_admin_actions = not self.show_full_result_count or bool(full_result_count)
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
def _get_default_ordering(self):
ordering = []
if self.model_admin.ordering:
ordering = self.model_admin.ordering
elif self.lookup_opts.ordering:
ordering = self.lookup_opts.ordering
return ordering
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.lookup_opts.get_field(field_name)
return field.name
except FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
def get_ordering(self, request, queryset):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
params = self.params
ordering = list(self.model_admin.get_ordering(request) or self._get_default_ordering())
if ORDER_VAR in params:
# Clear ordering and used params
ordering = []
order_params = params[ORDER_VAR].split('.')
for p in order_params:
try:
none, pfx, idx = p.rpartition('-')
field_name = self.list_display[int(idx)]
order_field = self.get_ordering_field(field_name)
if not order_field:
continue # No 'admin_order_field', skip it
# reverse order if order_field has already "-" as prefix
if order_field.startswith('-') and pfx == "-":
ordering.append(order_field[1:])
else:
ordering.append(pfx + order_field)
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
# Add the given query's ordering fields, if any.
ordering.extend(queryset.query.order_by)
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.lookup_opts.pk.name
if not (set(ordering) & {'pk', '-pk', pk_name, '-' + pk_name}):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
def get_ordering_field_columns(self):
"""
Returns an OrderedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying sort
# field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = OrderedDict()
if ORDER_VAR not in self.params:
# for ordering specified on ModelAdmin or model Meta, we don't know
# the right column numbers absolutely, because there might be more
# than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_queryset(self, request):
# First, we collect all the declared list filters.
(self.filter_specs, self.has_filters, remaining_lookup_params,
filters_use_distinct) = self.get_filters(request)
# Then, we let every list filter modify the queryset to its liking.
qs = self.root_queryset
for filter_spec in self.filter_specs:
new_qs = filter_spec.queryset(request, qs)
if new_qs is not None:
qs = new_qs
try:
# Finally, we apply the remaining lookup parameters from the query
# string (i.e. those that haven't already been processed by the
# filters).
qs = qs.filter(**remaining_lookup_params)
except (SuspiciousOperation, ImproperlyConfigured):
# Allow certain types of errors to be re-raised as-is so that the
# caller can treat them in a special way.
raise
except Exception as e:
# Every other error is caught with a naked except, because we don't
# have any other way of validating lookup parameters. They might be
# invalid if the keyword arguments are incorrect, or if the values
# are not in the correct type, so we might get FieldError,
# ValueError, ValidationError, or ?.
raise IncorrectLookupParameters(e)
if not qs.query.select_related:
qs = self.apply_select_related(qs)
# Set ordering.
ordering = self.get_ordering(request, qs)
qs = qs.order_by(*ordering)
# Apply search results
qs, search_use_distinct = self.model_admin.get_search_results(request, qs, self.query)
# Remove duplicates from results, if necessary
if filters_use_distinct | search_use_distinct:
return qs.distinct()
else:
return qs
def apply_select_related(self, qs):
if self.list_select_related is True:
return qs.select_related()
if self.list_select_related is False:
if self.has_related_field_in_list_display():
return qs.select_related()
if self.list_select_related:
return qs.select_related(*self.list_select_related)
return qs
def has_related_field_in_list_display(self):
for field_name in self.list_display:
try:
field = self.lookup_opts.get_field(field_name)
except FieldDoesNotExist:
pass
else:
if isinstance(field.remote_field, models.ManyToOneRel):
# <FK>_id field names don't require a join.
if field_name == field.get_attname():
continue
return True
return False
def url_for_result(self, result):
pk = getattr(result, self.pk_attname)
return reverse('admin:%s_%s_change' % (self.opts.app_label,
self.opts.model_name),
args=(quote(pk),),
current_app=self.model_admin.admin_site.name)
| mit |
leonardo-modules/leonardo-ckeditor | leonardo_ckeditor/ckeditor_config.py | 1 | 1037 |
from ckeditor.widgets import DEFAULT_CONFIG
DEFAULT_CONFIG.update({'allowedContent': True})
DEFAULT_CONFIG.update({'height': 350})
DEFAULT_CONFIG.update({'toolbar_Full': [
["Cut", "Copy", "Paste", "PasteText",
"PasteFromWord", "-", "Undo", "Redo"],
["Find", "Replace", "-", "SelectAll", "-", "SpellChecker", "Scayt"],
["Form", "Checkbox", "Radio", "TextField", "Textarea",
"Select", "Button", "ImageButton", "HiddenField"],
["Bold", "Italic", "Underline", "Strike",
"Subscript", "Superscript", "-", "RemoveFormat"],
["NumberedList", "BulletedList", "-", "Outdent",
"Indent", "-", "Blockquote", "CreateDiv",
"-", "JustifyLeft", "JustifyCenter", "JustifyRight",
"JustifyBlock", "-", "BidiLtr", "BidiRtl"],
["Link", "Unlink", "Anchor"],
["Image", "Flash", "Table", "HorizontalRule",
"Smiley", "SpecialChar", "PageBreak", "Iframe"],
["Styles", "Format", "Font", "FontSize"],
["TextColor", "BGColor"],
["Maximize", "ShowBlocks"],
["Source"]]})
| bsd-3-clause |
DylannCordel/django-cms | cms/migrations/0010_migrate_use_structure.py | 15 | 2287 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission, Group
from django.contrib.contenttypes.models import ContentType
from django.db import models, migrations
def forwards(apps, schema_editor):
ph_model = apps.get_model('cms', 'Placeholder')
page_model = apps.get_model('cms', 'Page')
try:
ph_ctype = ContentType.objects.get_for_model(ph_model)
page_ctype = ContentType.objects.get_for_model(page_model)
permission, _ = Permission.objects.get_or_create(
codename='use_structure', content_type=ph_ctype, name=u"Can use Structure mode")
page_permission = Permission.objects.get_or_create(codename='change_page', content_type=page_ctype)
for user in get_user_model().objects.filter(is_superuser=False, is_staff=True):
if user.has_perm("cms.change_page"):
user.user_permissions.add(permission)
for group in Group.objects.all():
if page_permission in group.permissions.all():
group.permissions.add(permission)
except ContentType.DoesNotExist:
print(u'Users not migrated to use_structure permission, please add the permission manually')
def backwards(apps, schema_editor):
ph_model = apps.get_model('cms', 'Placeholder')
ph_ctype = ContentType.objects.get(app_label=ph_model._meta.app_label, model=ph_model._meta.model_name)
permission, _ = Permission.objects.get_or_create(
codename='use_structure', content_type=ph_ctype, name=u"Can use Structure mode")
for user in get_user_model().objects.filter(is_superuser=False, is_staff=True):
if user.has_perm("cms.use_structure"):
user.user_permissions.remove(permission)
for group in Group.objects.all():
if permission in group.permissions.all():
group.permissions.remove(permission)
class Migration(migrations.Migration):
dependencies = [
('cms', '0009_merge'),
]
operations = [
migrations.AlterModelOptions(
name='placeholder',
options={'permissions': (('use_structure', 'Can use Structure mode'),)},
),
migrations.RunPython(forwards, backwards)
]
| bsd-3-clause |
jamesbdunlop/defaultMayaLibrary | yaml_import/skinWeights_Import.py | 1 | 5283 | from maya.OpenMayaAnim import MFnSkinCluster
from maya.OpenMaya import MIntArray, MDagPathArray
import os, getpass
import cPickle as pickle
import maya.mel as mel
import maya.cmds as cmds
import maya.OpenMaya as om
import maya.OpenMayaAnim as oma
import cPickle as pickle
import getpass, os, time
def getIndexes():
verts = []
for each in cmds.ls(sl=True):
if ':' in each:
myRange = each.split('.vtx[')[-1].replace(']', '')
start = myRange.split(":")[0]
end = myRange.split(":")[-1]
for x in range(int(start), int(end)+1):
verts.extend([x])
else:
myVertId = each.split('.vtx[')[-1].replace(']', '')
verts.extend([int(myVertId)])
return verts
def getData(filePath):
data = []
## Load data in from file...
with open(filePath, "rb") as fp:
data = pickle.load(fp)
return data or []
def setSkinWeights( meshName, skinClusterName, filePath, sel = False):
## Start the clock!
start = time.time()
cmds.undoInfo(openChunk=True)
vertJointWeightData = getData(filePath)
## Turn off maintain max influences..
cmds.setAttr('%s.maintainMaxInfluences' % skinClusterName, 0)
## Set weights normalize to none
cmds.setAttr('%s.normalizeWeights' % skinClusterName, 0)
## Set weight distribution to Distance cause neighbours is buggy
cmds.setAttr('%s.weightDistribution' % skinClusterName, 0)
#convert the vertex component names into vertex indices
idxJointWeight = []
for vert, jointsAndWeights in vertJointWeightData:
idx = int( vert[ vert.rindex( '[' )+1:-1 ] )
idxJointWeight.append( (idx, jointsAndWeights) )
mesh = cmds.listRelatives(meshName, shapes= True)[0]
mSel = om.MSelectionList()
mSel.add(mesh)
meshMObject = om.MObject()
meshDagPath = om.MDagPath()
mSel.getDependNode(0, meshMObject)
mSel.getDagPath(0, meshDagPath)
skinFn = None
iterDg = om.MItDependencyGraph(meshMObject, om.MItDependencyGraph.kDownstream, om.MItDependencyGraph.kPlugLevel)
while not iterDg.isDone():
currentItem = iterDg.currentItem()
if currentItem.hasFn(om.MFn.kSkinClusterFilter):
skinFn = oma.MFnSkinCluster(currentItem)
break
iterDg.next()
#construct a dict mapping joint names to joint indices
jApiIndices = {}
# Influences & Influence count
influences= om.MDagPathArray()
infCount = skinFn.influenceObjects(influences)
#influenceNames = [influences[i].partialPathName() for i in range(infCount)]
for i in range(infCount):
jApiIndices[ str(influences[i].partialPathName()) ] = skinFn.indexForInfluenceObject( influences[i] )
###########
weightListP = skinFn.findPlug( "weightList" )
weightListObj = weightListP.attribute()
weightsP = skinFn.findPlug( "weights" )
tmpIntArray = MIntArray()
baseFmtStr = str( skinClusterName ) + '.weightList[%d]' #pre build this string: fewer string ops == faster-ness!
if sel:
verts = getIndexes()
for vertIdx, jointsAndWeights in idxJointWeight:
#we need to use the api to query the physical indices used
weightsP.selectAncestorLogicalIndex( vertIdx, weightListObj )
weightsP.getExistingArrayAttributeIndices( tmpIntArray )
if sel:
if vertIdx in verts:
weightFmtStr = baseFmtStr % vertIdx + '.weights[%d]'
for jointName, weight in jointsAndWeights:
infIdx = jApiIndices[ jointName ]
cmds.setAttr(weightFmtStr % infIdx, weight)
else:
weightFmtStr = baseFmtStr % vertIdx + '.weights[%d]'
#clear out any existing skin data - and awesomely we cannot do this with the api - so we need to use a weird ass mel command
for n in range( tmpIntArray.length() ):
command = '%s' % (weightFmtStr % tmpIntArray[n])
mel.eval("removeMultiInstance %s;" % command)
#at this point using the api or mel to set the data is a moot point... we have the strings already so just use mel
for jointName, weight in jointsAndWeights:
if weight != 0.0:
try:
infIdx = jApiIndices[ jointName ]
cmds.setAttr(weightFmtStr % infIdx, weight)
except KeyError:
cmds.warning('Key error for %s skipping' % jointName)
pass
## Final cleanup
## Prune!
cmds.skinPercent(skinClusterName, pruneWeights = .0015)
## Set weights normalize to interactive
cmds.setAttr('%s.normalizeWeights' % skinClusterName, 1)
## Set weight distribution to neighbours
cmds.setAttr('%s.weightDistribution' % skinClusterName, 1)
## Normalize the weights
cmds.skinPercent(skinClusterName, normalize = True)
## Turn on maintain max again
cmds.setAttr('%s.maintainMaxInfluences' % skinClusterName, 1)
print 'Skin Import Complete..'
print 'Time to import skin weights for %s: %s' % (meshName, time.time() - start)
cmds.undoInfo(closeChunk=True)
| apache-2.0 |
ibarbech/learnbot | learnbot_components/emotionalMotor/src/specificworker.py | 2 | 8944 | #
# Copyright (C) 2017 by YOUR NAME HERE
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
#
import sys, os, traceback, time, copy, json
from PySide import QtGui, QtCore
from learnbot_components.emotionalMotor.src.genericworker import *
from PIL import Image, ImageDraw
# If RoboComp was compiled with Python bindings you can use InnerModel in Python
# sys.path.append('/opt/robocomp/lib')
# import librobocomp_qmat
# import librobocomp_osgviewer
# import librobocomp_innermodel
configPath = os.path.join(os.path.dirname(os.path.dirname(__file__)),'etc','config')
DEFAULTCONFIGNEUTRAL = {'cejaD': {'P2': {'y': 73, 'x': 314}, 'P3': {'y': 99, 'x': 355}, 'P1': {'y': 99, 'x': 278}, 'P4': {'y': 94, 'x': 313}}, 'ojoI': {'P2': {'y': 186, 'x': 196}, 'P1': {'y': 117, 'x': 127}}, 'cejaI': {'P2': {'y': 73, 'x': 160}, 'P3': {'y': 99, 'x': 201}, 'P1': {'y': 99, 'x': 122}, 'P4': {'y': 94, 'x': 160}}, 'ojoD': {'P2': {'y': 186, 'x': 351}, 'P1': {'y': 117, 'x': 282}}, 'boca': {'P2': {'y': 231, 'x': 239}, 'P3': {'y': 234, 'x': 309}, 'P1': {'y': 234, 'x': 170}, 'P6': {'y': 242, 'x': 170}, 'P4': {'y': 242, 'x': 309}, 'P5': {'y': 241, 'x': 239}}}
OFFSET = 0.06666666666666667
def bezier(p1, p2, t):
diff = (p2[0] - p1[0], p2[1] - p1[1])
return [p1[0] + diff[0] * t, p1[1] + diff[1] * t]
def getPointsBezier(points):
bezierPoints = list()
pointsCopy = copy.copy(points)
for t in [x/50. for x in range(51)]:
while len(points)!=1:
newPoints = list()
p1=points[0]
for p2 in points[1:]:
newPoints.append(bezier(p1,p2,t))
p1=p2
points=newPoints
bezierPoints.append(tuple(points[0]))
points=pointsCopy
return bezierPoints
def getBecierConfig(old_config, config_target, t):
config = copy.copy(old_config)
for parte in old_config:
for point in old_config[parte]:
if "Radio" in point:
radio = bezier((old_config[parte][point]["Value"],0), (config_target[parte][point]["Value"],0),t)
config[parte][point]["Value"] = radio[0]
else:
p = bezier((old_config[parte][point]["x"], old_config[parte][point]["y"]), (config_target[parte][point]["x"], config_target[parte][point]["y"]), t)
config[parte][point]["x"] = p[0]
config[parte][point]["y"] = p[1]
return config
class Face():
def __init__(self):
self.img = Image.new('RGB', (480, 320), (255, 255, 255))
self.draw = ImageDraw.Draw(self.img)
self.config = DEFAULTCONFIGNEUTRAL
self.old_config = DEFAULTCONFIGNEUTRAL
self.t = 0.06666666666666667
self.config_target = None
def render(self):
if self.t <= 1 and self.config_target is not None:
self.config = getBecierConfig(self.old_config, self.config_target, self.t)
self.draw.rectangle(((0, 0), (479, 319)), fill=(255, 255, 255), outline=(255, 255, 255))
self.renderOjo(self.config["ojoI"])
self.renderOjo(self.config["ojoD"])
self.renderParpado(self.config["parpadoI"])
self.renderParpado(self.config["parpadoD"])
self.renderCeja(self.config["cejaI"])
self.renderCeja(self.config["cejaD"])
self.renderBoca(self.config["boca"])
self.renderPupila(self.config["pupilaI"])
self.renderPupila(self.config["pupilaD"])
self.renderMejilla(self.config["mejillaI"])
self.renderMejilla(self.config["mejillaD"])
self.t += OFFSET
path = "/dev/fb0"
with open(path, "wb") as f:
f.write(self.img.tobytes())
# np.array(self.img)
# cv2.imwrite("/tmp/ebofaceimg.png",np.array(self.img))
return path
else:
self.old_config = self.config_target
self.config_target = None
return None
def renderPupila(self, points):
P1 = (points["Center"]["x"] - points["Radio"]["Value"], points["Center"]["y"] - points["Radio"]["Value"])
P2 = (points["Center"]["x"] + points["Radio"]["Value"], points["Center"]["y"] + points["Radio"]["Value"])
self.draw.ellipse((P1, P2), fill=(255,255,255), outline=(255,255,255))
# self.draw.ellipse((P1, P2), fill=1)
def renderParpado(self, points):
P1 = (points["P1"]["x"], points["P1"]["y"])
P2 = (points["P2"]["x"], points["P2"]["y"])
P3 = (points["P3"]["x"], points["P3"]["y"])
P4 = (points["P4"]["x"], points["P4"]["y"])
self.draw.polygon(getPointsBezier([P1,P2,P3]) + getPointsBezier([P3,P4,P1]), fill=(255,255,255))
def renderMejilla(self, points):
P1 = (points["P1"]["x"], points["P1"]["y"])
P2 = (points["P2"]["x"], points["P2"]["y"])
P3 = (points["P3"]["x"], points["P3"]["y"])
P4 = (points["P4"]["x"], points["P4"]["y"])
self.draw.polygon(getPointsBezier([P1,P2,P3]) + getPointsBezier([P3,P4,P1]), fill=(255,255,255))
def renderCeja(self, points):
P1 = (points["P1"]["x"], points["P1"]["y"])
P2 = (points["P2"]["x"], points["P2"]["y"])
P3 = (points["P3"]["x"], points["P3"]["y"])
P4 = (points["P4"]["x"], points["P4"]["y"])
self.draw.polygon(getPointsBezier([P1, P2, P3]) + getPointsBezier([P3, P4, P1]), fill=1)
def renderOjo(self, points):
P1 = (points["Center"]["x"] - points["Radio1"]["Value"], points["Center"]["y"] - points["Radio2"]["Value"])
P2 = (points["Center"]["x"] + points["Radio1"]["Value"], points["Center"]["y"] + points["Radio2"]["Value"])
# P1 = (points["P1"]["x"], points["P1"]["y"])
# P2 = (points["P2"]["x"], points["P2"]["y"])
self.draw.ellipse((P1, P2), fill=1)
def renderBoca(self, points):
P1 = (points["P1"]["x"], points["P1"]["y"])
P2 = (points["P2"]["x"], points["P2"]["y"])
P3 = (points["P3"]["x"], points["P3"]["y"])
P4 = (points["P4"]["x"], points["P4"]["y"])
P5 = (points["P5"]["x"], points["P5"]["y"])
P6 = (points["P6"]["x"], points["P6"]["y"])
self.draw.polygon(getPointsBezier([P1, P2, P3]) + getPointsBezier([P4, P5, P6]), fill=1, outline=10)
def setConfig(self, config):
self.config_target = config
self.old_config = self.config
self.t = 0.06666666666666667
class SpecificWorker(GenericWorker):
def __init__(self, proxy_map):
super(SpecificWorker, self).__init__(proxy_map)
self.timer.timeout.connect(self.compute)
self.Period = 66
self.timer.start(self.Period)
self.face = Face()
self.configEmotions = {}
for path in os.listdir(os.path.join(os.path.dirname(os.path.dirname(__file__)), "JSON")):
with open(os.path.join(os.path.dirname(os.path.dirname(__file__)), "JSON", path), "r") as f:
self.configEmotions[os.path.splitext(f)[0]] = json.loads(f.read())
def setParams(self, params):
return True
@QtCore.Slot()
def compute(self):
path = self.face.render()
# if path is not None:
# self.display_proxy.setImageFromFile(path)
return True
def sendImage(self, file):
img = QtGui.QImage(file)
self.i+=1
im = Image()
im.Img=img.bits()
im.width=img.width()
im.height=img.height()
try:
self.display_proxy.setImage(im)
except Exception as e:
traceback.print_exc()
#
# expressFear
#
def expressFear(self):
self.face.setConfig(self.configEmotions["Fear"])
# self.display_proxy.setImageFromFile("/home/robocomp/learnbot/learnbot_components/emotionalMotor/imgs/frameBuffer/miedo.fb")
#
# expressSurprise
#
def expressSurprise(self):
self.face.setConfig(self.configEmotions["Surprise"])
# self.display_proxy.setImageFromFile("/home/robocomp/learnbot/learnbot_components/emotionalMotor/imgs/frameBuffer/sorpresa.fb")
#
# expressAnger
#
def expressAnger(self):
self.face.setConfig(self.configEmotions["Anger"])
# self.display_proxy.setImageFromFile("/home/robocomp/learnbot/learnbot_components/emotionalMotor/imgs/frameBuffer/ira.fb")
#
# expressSadness
#
def expressSadness(self):
self.face.setConfig(self.configEmotions["Sadness"])
# self.display_proxy.setImageFromFile("/home/robocomp/learnbot/learnbot_components/emotionalMotor/imgs/frameBuffer/tristeza.fb")
#
# expressDisgust
#
def expressDisgust(self):
self.face.setConfig(self.configEmotions["Disgust"])
# self.display_proxy.setImageFromFile("/home/robocomp/learnbot/learnbot_components/emotionalMotor/imgs/frameBuffer/asco.fb")
#
# expressJoy
#
def expressJoy(self):
self.face.setConfig(self.configEmotions["Joy"])
# self.display_proxy.setImageFromFile("/home/robocomp/learnbot/learnbot_components/emotionalMotor/imgs/frameBuffer/alegria.fb")
#
# expressNeutral
#
def expressNeutral(self):
self.face.setConfig(self.configEmotions["Neutral"])
# self.display_proxy.setImageFromFile("/home/robocomp/learnbot/learnbot_components/emotionalMotor/imgs/frameBuffer/SinEmocion2.fb")
| gpl-3.0 |
quoideneuf/selenium | py/test/selenium/webdriver/common/stale_reference_tests.py | 65 | 2352 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from selenium.webdriver.common.by import By
from selenium.common.exceptions import StaleElementReferenceException
class StaleReferenceTests(unittest.TestCase):
def testOldPage(self):
self._loadSimplePage()
elem = self.driver.find_element(by=By.ID, value="links")
self._loadPage("xhtmlTest")
try:
elem.click()
self.fail("Should Throw a StaleElementReferenceException but didnt")
except StaleElementReferenceException as e:
pass
def testShouldNotCrashWhenCallingGetSizeOnAnObsoleteElement(self):
self._loadSimplePage()
elem = self.driver.find_element(by=By.ID, value="links")
self._loadPage("xhtmlTest")
try:
elem.size
self.fail("Should Throw a StaleElementReferenceException but didnt")
except StaleElementReferenceException as e:
pass
def testShouldNotCrashWhenQueryingTheAttributeOfAStaleElement(self):
self._loadPage("xhtmlTest")
heading = self.driver.find_element(by=By.XPATH, value="//h1")
self._loadSimplePage()
try:
heading.get_attribute("class")
self.fail("Should Throw a StaleElementReferenceException but didnt")
except StaleElementReferenceException as e:
pass
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| apache-2.0 |
sam81/pychoacoustics | pychoacoustics/stats_utils.py | 1 | 4020 | # -*- coding: utf-8 -*-
# Copyright (C) 2008-2020 Samuele Carcagno <sam.carcagno@gmail.com>
# This file is part of pychoacoustics
# pychoacoustics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pychoacoustics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pychoacoustics. If not, see <http://www.gnu.org/licenses/>.
from __future__ import nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals
import numpy
from numpy import array, sqrt, log10, mean, sign, sqrt, unique
from scipy.stats.distributions import norm
from scipy.stats.mstats import gmean
def geoMean(vals):
vals = array(vals)
if len(unique(sign(vals))) != 1:
raise ArithmeticError("Sequence of numbers for geometric mean must be all positive or all negative")
vals = numpy.abs(vals)
m = gmean(vals)
return m
def geoSd(vals):
if len(unique(sign(vals))) != 1:
raise ArithmeticError("Sequence of numbers for geometric standard deviation must be all positive or all negative")
vals = numpy.abs(vals)
res = 10**numpy.std(numpy.log10(vals), ddof=1)
return res
def geoSe(vals):
if len(unique(sign(vals))) != 1:
raise ArithmeticError("Sequence of numbers for geometric standard error must be all positive or all negative")
vals = numpy.abs(vals)
n = len(vals)
standardErr = 10**sqrt(sum((log10(vals) - mean(log10(vals)))**2) / ((n-1)* n))
return(standardErr)
def se(vals):
standardDev = numpy.std(vals, ddof=1)
standardErr = standardDev / sqrt(len(vals))
return(standardErr)
def getdprime(A_correct, A_total, B_correct, B_total, corrected):
if corrected == True:
if A_correct == A_total:
tA = 1 - 1/(2*A_total)
elif A_correct == 0:
tA = 1 / (2*A_total)
else:
tA = A_correct/(A_total)
if B_correct == B_total:
tB = 1 - 1/(2*B_total)
elif B_correct == 0:
tB = 1 / (2*B_total)
else:
tB = B_correct/(B_total)
else:
tA = A_correct/(A_total)
tB = B_correct/(B_total)
dp = norm.ppf(tA) - norm.ppf(1-(tB))
return dp
def gammaShRaFromMeanSD(mean, sd):
if mean <=0:
raise ValueError("mean must be > 0")
if sd <= 0:
raise ValueError("sd must be > 0")
shape = (mean**2)/(sd**2)
rate = mean/(sd**2)
return shape, rate
def gammaShRaFromModeSD(mode, sd):
if mode <=0:
raise ValueError("mode must be > 0")
if sd <= 0:
raise ValueError("sd must be > 0")
rate = (mode + sqrt(mode**2 + 4 * sd**2)) / (2 * sd**2)
shape = 1 + mode * rate
return shape, rate
def betaABFromMeanSTD(mean, std):
if mean <=0 or mean >= 1:
raise ValueError("must have 0 < mean < 1")
if std <= 0:
raise ValueError("sd must be > 0")
kappa = mean*(1-mean)/std**2 - 1
if kappa <= 0:
raise ValueError("invalid combination of mean and sd")
a = mean * kappa
b = (1.0 - mean) * kappa
return a, b
def betaMeanSTDFromAB(a,b):
mu = a/(a+b)
std = sqrt(a*b/((a+b)**2*(a+b+1)))
#= mu*(1-mu)/(a+b+1)
return mu, std
def generalizedBetaABFromMeanSTD(mu, std, xmin, xmax):
lmbd = (((mu-xmin)*(xmax-mu))/std**2)-1
a = lmbd*((mu-xmin)/(xmax-xmin))
b = lmbd*((xmax-mu)/(xmax-xmin))
return a,b
def generalizedBetaMeanSTDFromAB(a,b,xmin,xmax):
mu = (xmin*b+xmax*a)/(a+b)
std = sqrt(((a*b)*(xmax-xmin)**2)/((a+b)**2*(1+a+b)))
return mu, std
| gpl-3.0 |
ifduyue/sentry | src/sentry/south_migrations/0013_auto__add_messagecountbyminute__add_unique_messagecountbyminute_group_.py | 5 | 11565 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MessageCountByMinute'
db.create_table(
'sentry_messagecountbyminute', (
(
'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(
primary_key=True
)
), (
'group', self.gf('sentry.db.models.fields.FlexibleForeignKey')(
to=orm['sentry.GroupedMessage']
)
), ('date', self.gf('django.db.models.fields.DateTimeField')()),
('times_seen', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
)
)
db.send_create_signal('sentry', ['MessageCountByMinute'])
# Adding unique constraint on 'MessageCountByMinute', fields ['group', 'date']
db.create_unique('sentry_messagecountbyminute', ['group_id', 'date'])
# Adding model 'MessageFilterValue'
db.create_table(
'sentry_messagefiltervalue', (
(
'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(
primary_key=True
)
), (
'group', self.gf('sentry.db.models.fields.FlexibleForeignKey')(
to=orm['sentry.GroupedMessage']
)
),
('times_seen', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('key', self.gf('django.db.models.fields.CharField')(max_length=32)),
('value', self.gf('django.db.models.fields.CharField')(max_length=200)),
)
)
db.send_create_signal('sentry', ['MessageFilterValue'])
# Adding unique constraint on 'MessageFilterValue', fields ['key', 'value', 'group']
db.create_unique('sentry_messagefiltervalue', ['key', 'value', 'group_id'])
def backwards(self, orm):
# Removing unique constraint on 'MessageFilterValue', fields ['key', 'value', 'group']
db.delete_unique('sentry_messagefiltervalue', ['key', 'value', 'group_id'])
# Removing unique constraint on 'MessageCountByMinute', fields ['group', 'date']
db.delete_unique('sentry_messagecountbyminute', ['group_id', 'date'])
# Deleting model 'MessageCountByMinute'
db.delete_table('sentry_messagecountbyminute')
# Deleting model 'MessageFilterValue'
db.delete_table('sentry_messagefiltervalue')
models = {
'sentry.filtervalue': {
'Meta': {
'unique_together': "(('key', 'value'),)",
'object_name': 'FilterValue'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.groupedmessage': {
'Meta': {
'unique_together': "(('logger', 'view', 'checksum'),)",
'object_name': 'GroupedMessage'
},
'checksum': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'class_name': (
'django.db.models.fields.CharField', [], {
'db_index': 'True',
'max_length': '128',
'null': 'True',
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'score': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'times_seen': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
),
'traceback':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'view': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.message': {
'Meta': {
'object_name': 'Message'
},
'checksum': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'class_name': (
'django.db.models.fields.CharField', [], {
'db_index': 'True',
'max_length': '128',
'null': 'True',
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'message_set'",
'null': 'True',
'to': "orm['sentry.GroupedMessage']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'message_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'server_name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'db_index': 'True'
}),
'site': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'traceback':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'view': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.messagecountbyminute': {
'Meta': {
'unique_together': "(('group', 'date'),)",
'object_name': 'MessageCountByMinute'
},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.GroupedMessage']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.messagefiltervalue': {
'Meta': {
'unique_together': "(('key', 'value', 'group'),)",
'object_name': 'MessageFilterValue'
},
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.GroupedMessage']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.messageindex': {
'Meta': {
'unique_together': "(('column', 'value', 'object_id'),)",
'object_name': 'MessageIndex'
},
'column': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '128'
})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
brownian/frescobaldi | frescobaldi_app/objecteditor/defineoffset.py | 3 | 3424 | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Finds out which type of LilyPond object the offset will be applied to
using ly.music, stores this data and creates and inserts an override command.
"""
from PyQt5 import QtGui
import documentinfo
import lydocument
import reformat
class DefineOffset():
def __init__(self, doc):
self.doc = doc
self.docinfo = documentinfo.music(doc)
self.node = None
self.lilyObject = None
self.lilyContext = ""
self.pos = 0
def getCurrentLilyObject(self, cursor):
""" Use cursor from textedit link to get type of object being edited."""
lycursor = lydocument.cursor(cursor)
self.pos = lycursor.start
node = self.docinfo.node(self.pos)
self.node = node
child = self.docinfo.iter_music(node)
for c in child:
name = c.__class__.__name__ #get instance name
return self.item2object(name)
name = node.__class__.__name__
return self.item2object(name)
def item2object(self, item):
""" Translate item type into name of
LilyPond object.
"""
item2objectDict = {
"String": { "GrobType": "TextScript" },
"Markup": { "GrobType": "TextScript" },
"Tempo": { "GrobType": "MetronomeMark",
"Context" : "Score" },
"Articulation": { "GrobType": "Script" }
}
try:
obj = item2objectDict[item]
except KeyError:
obj = { "GrobType": "still testing!" }
self.lilyObject = obj["GrobType"]
if "Context" in obj:
self.lilyContext = obj["Context"]
return obj["GrobType"]
def insertOverride(self, x, y):
""" Insert the override command. """
doc = lydocument.Document(self.doc)
block = doc.block(self.pos)
p = block.position()
cursor = QtGui.QTextCursor(self.doc)
cursor.setPosition(p)
cursor.beginEditBlock()
cursor.insertText(self.createOffsetOverride(x, y))
cursor.insertBlock()
cursor.endEditBlock()
reformat.reformat(cursor)
def createOffsetOverride(self, x, y):
""" Create the override command.
Can this be created as a node?
"""
objToOverride = self.lilyContext
if len(objToOverride) > 0:
objToOverride += "."
objToOverride += self.lilyObject
return (r"\once \override {0}.extra-offset = #'({1:.2f} . {2:.2f})"
.format(objToOverride, x, y))
| gpl-2.0 |
kvar/ansible | lib/ansible/modules/cloud/docker/docker_host_info.py | 14 | 11807 | #!/usr/bin/python
#
# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_host_info
short_description: Retrieves facts about docker host and lists of objects of the services.
description:
- Retrieves facts about a docker host.
- Essentially returns the output of C(docker system info).
- The module also allows to list object names for containers, images, networks and volumes.
It also allows to query information on disk usage.
- The output differs depending on API version of the docker daemon.
- If the docker daemon cannot be contacted or does not meet the API version requirements,
the module will fail.
version_added: "2.8"
options:
containers:
description:
- Whether to list containers.
type: bool
default: no
containers_filters:
description:
- A dictionary of filter values used for selecting containers to delete.
- "For example, C(until: 24h)."
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
for more information on possible filters.
type: dict
images:
description:
- Whether to list images.
type: bool
default: no
images_filters:
description:
- A dictionary of filter values used for selecting images to delete.
- "For example, C(dangling: true)."
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
for more information on possible filters.
type: dict
networks:
description:
- Whether to list networks.
type: bool
default: no
networks_filters:
description:
- A dictionary of filter values used for selecting networks to delete.
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
for more information on possible filters.
type: dict
volumes:
description:
- Whether to list volumes.
type: bool
default: no
volumes_filters:
description:
- A dictionary of filter values used for selecting volumes to delete.
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
for more information on possible filters.
type: dict
disk_usage:
description:
- Summary information on used disk space by all Docker layers.
- The output is a sum of images, volumes, containers and build cache.
type: bool
default: no
verbose_output:
description:
- When set to C(yes) and I(networks), I(volumes), I(images), I(containers) or I(disk_usage) is set to C(yes)
then output will contain verbose information about objects matching the full output of API method.
For details see the documentation of your version of Docker API at L(https://docs.docker.com/engine/api/).
- The verbose output in this module contains only subset of information returned by I(_info) module
for each type of the objects.
type: bool
default: no
extends_documentation_fragment:
- docker
- docker.docker_py_1_documentation
author:
- Piotr Wojciechowski (@WojciechowskiPiotr)
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
- "Docker API >= 1.21"
'''
EXAMPLES = '''
- name: Get info on docker host
docker_host_info:
register: result
- name: Get info on docker host and list images
docker_host_info:
images: yes
register: result
- name: Get info on docker host and list images matching the filter
docker_host_info:
images: yes
images_filters:
label: "mylabel"
register: result
- name: Get info on docker host and verbose list images
docker_host_info:
images: yes
verbose_output: yes
register: result
- name: Get info on docker host and used disk space
docker_host_info:
disk_usage: yes
register: result
- debug:
var: result.host_info
'''
RETURN = '''
can_talk_to_docker:
description:
- Will be C(true) if the module can talk to the docker daemon.
returned: both on success and on error
type: bool
host_info:
description:
- Facts representing the basic state of the docker host. Matches the C(docker system info) output.
returned: always
type: dict
volumes:
description:
- List of dict objects containing the basic information about each volume.
Keys matches the C(docker volume ls) output unless I(verbose_output=yes).
See description for I(verbose_output).
returned: When I(volumes) is C(yes)
type: list
elements: dict
networks:
description:
- List of dict objects containing the basic information about each network.
Keys matches the C(docker network ls) output unless I(verbose_output=yes).
See description for I(verbose_output).
returned: When I(networks) is C(yes)
type: list
elements: dict
containers:
description:
- List of dict objects containing the basic information about each container.
Keys matches the C(docker container ls) output unless I(verbose_output=yes).
See description for I(verbose_output).
returned: When I(containers) is C(yes)
type: list
elements: dict
images:
description:
- List of dict objects containing the basic information about each image.
Keys matches the C(docker image ls) output unless I(verbose_output=yes).
See description for I(verbose_output).
returned: When I(images) is C(yes)
type: list
elements: dict
disk_usage:
description:
- Information on summary disk usage by images, containers and volumes on docker host
unless I(verbose_output=yes). See description for I(verbose_output).
returned: When I(disk_usage) is C(yes)
type: dict
'''
import traceback
from ansible.module_utils.docker.common import (
AnsibleDockerClient,
DockerBaseClass,
RequestException,
)
from ansible.module_utils._text import to_native
try:
from docker.errors import DockerException, APIError
except ImportError:
# Missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible.module_utils.docker.common import clean_dict_booleans_for_docker_api
class DockerHostManager(DockerBaseClass):
def __init__(self, client, results):
super(DockerHostManager, self).__init__()
self.client = client
self.results = results
self.verbose_output = self.client.module.params['verbose_output']
listed_objects = ['volumes', 'networks', 'containers', 'images']
self.results['host_info'] = self.get_docker_host_info()
if self.client.module.params['disk_usage']:
self.results['disk_usage'] = self.get_docker_disk_usage_facts()
for docker_object in listed_objects:
if self.client.module.params[docker_object]:
returned_name = docker_object
filter_name = docker_object + "_filters"
filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
def get_docker_host_info(self):
try:
return self.client.info()
except APIError as exc:
self.client.fail("Error inspecting docker host: %s" % to_native(exc))
def get_docker_disk_usage_facts(self):
try:
if self.verbose_output:
return self.client.df()
else:
return dict(LayersSize=self.client.df()['LayersSize'])
except APIError as exc:
self.client.fail("Error inspecting docker host: %s" % to_native(exc))
def get_docker_items_list(self, docker_object=None, filters=None, verbose=False):
items = None
items_list = []
header_containers = ['Id', 'Image', 'Command', 'Created', 'Status', 'Ports', 'Names']
header_volumes = ['Driver', 'Name']
header_images = ['Id', 'RepoTags', 'Created', 'Size']
header_networks = ['Id', 'Driver', 'Name', 'Scope']
filter_arg = dict()
if filters:
filter_arg['filters'] = filters
try:
if docker_object == 'containers':
items = self.client.containers(**filter_arg)
elif docker_object == 'networks':
items = self.client.networks(**filter_arg)
elif docker_object == 'images':
items = self.client.images(**filter_arg)
elif docker_object == 'volumes':
items = self.client.volumes(**filter_arg)
except APIError as exc:
self.client.fail("Error inspecting docker host for object '%s': %s" %
(docker_object, to_native(exc)))
if self.verbose_output:
if docker_object != 'volumes':
return items
else:
return items['Volumes']
if docker_object == 'volumes':
items = items['Volumes']
for item in items:
item_record = dict()
if docker_object == 'containers':
for key in header_containers:
item_record[key] = item.get(key)
elif docker_object == 'networks':
for key in header_networks:
item_record[key] = item.get(key)
elif docker_object == 'images':
for key in header_images:
item_record[key] = item.get(key)
elif docker_object == 'volumes':
for key in header_volumes:
item_record[key] = item.get(key)
items_list.append(item_record)
return items_list
def main():
argument_spec = dict(
containers=dict(type='bool', default=False),
containers_filters=dict(type='dict'),
images=dict(type='bool', default=False),
images_filters=dict(type='dict'),
networks=dict(type='bool', default=False),
networks_filters=dict(type='dict'),
volumes=dict(type='bool', default=False),
volumes_filters=dict(type='dict'),
disk_usage=dict(type='bool', default=False),
verbose_output=dict(type='bool', default=False),
)
option_minimal_versions = dict(
network_filters=dict(docker_py_version='2.0.2'),
disk_usage=dict(docker_py_version='2.2.0'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
min_docker_version='1.10.0',
min_docker_api_version='1.21',
option_minimal_versions=option_minimal_versions,
fail_results=dict(
can_talk_to_docker=False,
),
)
client.fail_results['can_talk_to_docker'] = True
try:
results = dict(
changed=False,
)
DockerHostManager(client, results)
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
except RequestException as e:
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
EliasTouil/simpleBlog | simpleBlog/Lib/site-packages/pip/_vendor/requests/packages/urllib3/__init__.py | 360 | 2852 | """
urllib3 - Thread-safe connection pooling and re-using.
"""
from __future__ import absolute_import
import warnings
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = '1.16'
__all__ = (
'HTTPConnectionPool',
'HTTPSConnectionPool',
'PoolManager',
'ProxyManager',
'HTTPResponse',
'Retry',
'Timeout',
'add_stderr_logger',
'connection_from_url',
'disable_warnings',
'encode_multipart_formdata',
'get_host',
'make_headers',
'proxy_from_url',
)
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s', __name__)
return handler
# ... Clean up.
del NullHandler
# All warning filters *must* be appended unless you're really certain that they
# shouldn't be: otherwise, it's very hard for users to use most Python
# mechanisms to silence them.
# SecurityWarning's always go off by default.
warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
# SubjectAltNameWarning's should go off once per host
warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
append=True)
# SNIMissingWarnings should go off only once.
warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)
| gpl-3.0 |
2014c2g7/c2g7 | wsgi/static/Brython2.1.0-20140419-113919/Lib/unittest/test/test_result.py | 788 | 19069 | import io
import sys
import textwrap
from test import support
import traceback
import unittest
class Test_TestResult(unittest.TestCase):
# Note: there are not separate tests for TestResult.wasSuccessful(),
# TestResult.errors, TestResult.failures, TestResult.testsRun or
# TestResult.shouldStop because these only have meaning in terms of
# other TestResult methods.
#
# Accordingly, tests for the aforenamed attributes are incorporated
# in with the tests for the defining methods.
################################################################
def test_init(self):
result = unittest.TestResult()
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 0)
self.assertEqual(result.shouldStop, False)
self.assertIsNone(result._stdout_buffer)
self.assertIsNone(result._stderr_buffer)
# "This method can be called to signal that the set of tests being
# run should be aborted by setting the TestResult's shouldStop
# attribute to True."
def test_stop(self):
result = unittest.TestResult()
result.stop()
self.assertEqual(result.shouldStop, True)
# "Called when the test case test is about to be run. The default
# implementation simply increments the instance's testsRun counter."
def test_startTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# "Called after the test case test has been executed, regardless of
# the outcome. The default implementation does nothing."
def test_stopTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# Same tests as above; make sure nothing has changed
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "Called before and after tests are run. The default implementation does nothing."
def test_startTestRun_stopTestRun(self):
result = unittest.TestResult()
result.startTestRun()
result.stopTestRun()
# "addSuccess(test)"
# ...
# "Called when the test case test succeeds"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addSuccess(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "addFailure(test, err)"
# ...
# "Called when the test case test signals a failure. err is a tuple of
# the form returned by sys.exc_info(): (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addFailure(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
test.fail("foo")
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addFailure(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 1)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.failures[0]
self.assertTrue(test_case is test)
self.assertIsInstance(formatted_exc, str)
# "addError(test, err)"
# ...
# "Called when the test case test raises an unexpected exception err
# is a tuple of the form returned by sys.exc_info():
# (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addError(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
raise TypeError()
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addError(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.errors[0]
self.assertTrue(test_case is test)
self.assertIsInstance(formatted_exc, str)
def testGetDescriptionWithoutDocstring(self):
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
'testGetDescriptionWithoutDocstring (' + __name__ +
'.Test_TestResult)')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithOneLineDocstring(self):
"""Tests getDescription() for a method with a docstring."""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithOneLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a docstring.'))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithMultiLineDocstring(self):
"""Tests getDescription() for a method with a longer docstring.
The second line of the docstring.
"""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithMultiLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a longer '
'docstring.'))
def testStackFrameTrimming(self):
class Frame(object):
class tb_frame(object):
f_globals = {}
result = unittest.TestResult()
self.assertFalse(result._is_relevant_tb_level(Frame))
Frame.tb_frame.f_globals['__unittest'] = True
self.assertTrue(result._is_relevant_tb_level(Frame))
def testFailFast(self):
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addError(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addFailure(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addUnexpectedSuccess(None)
self.assertTrue(result.shouldStop)
def testFailFastSetByRunner(self):
runner = unittest.TextTestRunner(stream=io.StringIO(), failfast=True)
def test(result):
self.assertTrue(result.failfast)
result = runner.run(test)
classDict = dict(unittest.TestResult.__dict__)
for m in ('addSkip', 'addExpectedFailure', 'addUnexpectedSuccess',
'__init__'):
del classDict[m]
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failures = []
self.errors = []
self.testsRun = 0
self.shouldStop = False
self.buffer = False
classDict['__init__'] = __init__
OldResult = type('OldResult', (object,), classDict)
class Test_OldTestResult(unittest.TestCase):
def assertOldResultWarning(self, test, failures):
with support.check_warnings(("TestResult has no add.+ method,",
RuntimeWarning)):
result = OldResult()
test.run(result)
self.assertEqual(len(result.failures), failures)
def testOldTestResult(self):
class Test(unittest.TestCase):
def testSkip(self):
self.skipTest('foobar')
@unittest.expectedFailure
def testExpectedFail(self):
raise TypeError
@unittest.expectedFailure
def testUnexpectedSuccess(self):
pass
for test_name, should_pass in (('testSkip', True),
('testExpectedFail', True),
('testUnexpectedSuccess', False)):
test = Test(test_name)
self.assertOldResultWarning(test, int(not should_pass))
def testOldTestTesultSetup(self):
class Test(unittest.TestCase):
def setUp(self):
self.skipTest('no reason')
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldTestResultClass(self):
@unittest.skip('no reason')
class Test(unittest.TestCase):
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldResultWithRunner(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
runner = unittest.TextTestRunner(resultclass=OldResult,
stream=io.StringIO())
# This will raise an exception if TextTestRunner can't handle old
# test result objects
runner.run(Test('testFoo'))
class MockTraceback(object):
@staticmethod
def format_exception(*_):
return ['A traceback']
def restore_traceback():
unittest.result.traceback = traceback
class TestOutputBuffering(unittest.TestCase):
def setUp(self):
self._real_out = sys.stdout
self._real_err = sys.stderr
def tearDown(self):
sys.stdout = self._real_out
sys.stderr = self._real_err
def testBufferOutputOff(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
def testBufferOutputStartTestAddSuccess(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
result.buffer = True
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIsNot(real_out, sys.stdout)
self.assertIsNot(real_err, sys.stderr)
self.assertIsInstance(sys.stdout, io.StringIO)
self.assertIsInstance(sys.stderr, io.StringIO)
self.assertIsNot(sys.stdout, sys.stderr)
out_stream = sys.stdout
err_stream = sys.stderr
result._original_stdout = io.StringIO()
result._original_stderr = io.StringIO()
print('foo')
print('bar', file=sys.stderr)
self.assertEqual(out_stream.getvalue(), 'foo\n')
self.assertEqual(err_stream.getvalue(), 'bar\n')
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
result.addSuccess(self)
result.stopTest(self)
self.assertIs(sys.stdout, result._original_stdout)
self.assertIs(sys.stderr, result._original_stderr)
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
self.assertEqual(out_stream.getvalue(), '')
self.assertEqual(err_stream.getvalue(), '')
def getStartedResult(self):
result = unittest.TestResult()
result.buffer = True
result.startTest(self)
return result
def testBufferOutputAddErrorOrFailure(self):
unittest.result.traceback = MockTraceback
self.addCleanup(restore_traceback)
for message_attr, add_attr, include_error in [
('errors', 'addError', True),
('failures', 'addFailure', False),
('errors', 'addError', True),
('failures', 'addFailure', False)
]:
result = self.getStartedResult()
buffered_out = sys.stdout
buffered_err = sys.stderr
result._original_stdout = io.StringIO()
result._original_stderr = io.StringIO()
print('foo', file=sys.stdout)
if include_error:
print('bar', file=sys.stderr)
addFunction = getattr(result, add_attr)
addFunction(self, (None, None, None))
result.stopTest(self)
result_list = getattr(result, message_attr)
self.assertEqual(len(result_list), 1)
test, message = result_list[0]
expectedOutMessage = textwrap.dedent("""
Stdout:
foo
""")
expectedErrMessage = ''
if include_error:
expectedErrMessage = textwrap.dedent("""
Stderr:
bar
""")
expectedFullMessage = 'A traceback%s%s' % (expectedOutMessage, expectedErrMessage)
self.assertIs(test, self)
self.assertEqual(result._original_stdout.getvalue(), expectedOutMessage)
self.assertEqual(result._original_stderr.getvalue(), expectedErrMessage)
self.assertMultiLineEqual(message, expectedFullMessage)
def testBufferSetupClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def setUpClass(cls):
1/0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def tearDownClass(cls):
1/0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferSetUpModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def setUpModule():
1/0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def tearDownModule():
1/0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
aaldaber/owid-grapher | importer/edstats_importer.py | 1 | 48292 | import sys
import os
import hashlib
import json
import logging
import requests
import unidecode
import shutil
import time
import zipfile
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import grapher_admin.wsgi
from openpyxl import load_workbook
from grapher_admin.models import Entity, DatasetSubcategory, DatasetCategory, Dataset, Source, Variable, VariableType, DataValue, ChartDimension
from importer.models import ImportHistory, AdditionalCountryInfo
from country_name_tool.models import CountryName
from django.conf import settings
from django.db import connection, transaction
from django.utils import timezone
from django.urls import reverse
from grapher_admin.views import write_dataset_csv
# we will use the file checksum to check if the downloaded file has changed since we last saw it
def file_checksum(filename, blocksize=2**20):
m = hashlib.md5()
with open(filename, "rb") as f:
while True:
buffer = f.read(blocksize)
if not buffer:
break
m.update(buffer)
return m.hexdigest()
def short_unit_extract(unit: str):
common_short_units = ['$', '£', '€', '%'] # used for extracting short forms of units of measurement
short_unit = None
if unit:
if ' per ' in unit:
short_form = unit.split(' per ')[0]
if any(w in short_form for w in common_short_units):
for x in common_short_units:
if x in short_form:
short_unit = x
break
else:
short_unit = short_form
elif any(x in unit for x in common_short_units):
for y in common_short_units:
if y in unit:
short_unit = y
break
elif 'percentage' in unit:
short_unit = '%'
elif 'percent' in unit.lower():
short_unit = '%'
elif len(unit) < 9: # this length is sort of arbitrary at this point, taken from the unit 'hectares'
short_unit = unit
return short_unit
source_description = {
'dataPublishedBy': "World Bank EdStats",
'link': "https://data.worldbank.org/data-catalog/ed-stats",
'retrievedDate': timezone.now().strftime("%d-%B-%y")
}
edstats_zip_file_url = 'http://databank.worldbank.org/data/download/EdStats_excel.zip'
edstats_downloads_save_location = settings.BASE_DIR + '/data/edstats_downloads/'
# create a directory for holding the downloads
# if the directory exists, delete it and recreate it
if not os.path.exists(edstats_downloads_save_location):
os.makedirs(edstats_downloads_save_location)
#else:
# shutil.rmtree(edstats_downloads_save_location)
# os.makedirs(edstats_downloads_save_location)
logger = logging.getLogger('importer')
start_time = time.time()
logger.info("Getting the zip file")
request_header = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
r = requests.get(edstats_zip_file_url, stream=True, headers=request_header)
if r.ok:
with open(edstats_downloads_save_location + 'edstats.zip', 'wb') as out_file:
shutil.copyfileobj(r.raw, out_file)
logger.info("Saved the zip file to disk.")
z = zipfile.ZipFile(edstats_downloads_save_location + 'edstats.zip')
excel_filename = edstats_downloads_save_location + z.namelist()[0] # there should be only one file inside the zipfile, so we will load that one
z.extractall(edstats_downloads_save_location)
r = None # we do not need the request anymore
logger.info("Successfully extracted the zip file")
else:
logger.error("The file could not be downloaded. Stopping the script...")
sys.exit("Could not download file.")
edstats_category_name_in_db = 'World Bank EdStats' # set the name of the root category of all data that will be imported by this script
import_history = ImportHistory.objects.filter(import_type='edstats')
#excel_filename = edstats_downloads_save_location + "WDIEXCEL.xlsx"
with transaction.atomic():
# if edstats imports were never performed
if not import_history:
logger.info("This is the very first EdStats data import.")
wb = load_workbook(excel_filename, read_only=True)
series_ws = wb['Series']
data_ws = wb['Data']
country_ws = wb['Country']
column_number = 0 # this will be reset to 0 on each new row
row_number = 0 # this will be reset to 0 if we switch to another worksheet, or start reading the worksheet from the beginning one more time
global_cat = {} # global catalog of indicators
# data in the worksheets is not loaded into memory at once, that causes RAM to quickly fill up
# instead, we go through each row and cell one-by-one, looking at each piece of data separately
# this has the disadvantage of needing to traverse the worksheet several times, if we need to look up some rows/cells again
for row in series_ws.rows:
row_number += 1
for cell in row:
if row_number > 1:
column_number += 1
if column_number == 1:
global_cat[cell.value.upper().strip()] = {}
indicatordict = global_cat[cell.value.upper().strip()]
if column_number == 2:
indicatordict['category'] = cell.value
if column_number == 3:
indicatordict['name'] = cell.value
if column_number == 5:
indicatordict['description'] = cell.value
if column_number == 6:
if cell.value:
indicatordict['unitofmeasure'] = cell.value
else:
if '(' not in indicatordict['name']:
indicatordict['unitofmeasure'] = ''
else:
indicatordict['unitofmeasure'] = indicatordict['name'][
indicatordict['name'].rfind('(') + 1:indicatordict[
'name'].rfind(')')]
if column_number == 11:
if cell.value:
indicatordict['limitations'] = cell.value
else:
indicatordict['limitations'] = ''
if column_number == 12:
if cell.value:
indicatordict['sourcenotes'] = cell.value
else:
indicatordict['sourcenotes'] = ''
if column_number == 13:
if cell.value:
indicatordict['comments'] = cell.value
else:
indicatordict['comments'] = ''
if column_number == 14:
indicatordict['source'] = cell.value
if column_number == 15:
if cell.value:
indicatordict['concept'] = cell.value
else:
indicatordict['concept'] = ''
if column_number == 17:
if cell.value:
indicatordict['sourcelinks'] = cell.value
else:
indicatordict['sourcelinks'] = ''
if column_number == 18:
if cell.value:
indicatordict['weblinks'] = cell.value
else:
indicatordict['weblinks'] = ''
indicatordict['saved'] = False
column_number = 0
category_vars = {} # categories and their corresponding variables
for key, value in global_cat.items():
if value['category'] in category_vars:
category_vars[value['category']].append(key)
else:
category_vars[value['category']] = []
category_vars[value['category']].append(key)
existing_categories = DatasetCategory.objects.values('name')
existing_categories_list = {item['name'] for item in existing_categories}
if edstats_category_name_in_db not in existing_categories_list:
the_category = DatasetCategory(name=edstats_category_name_in_db, fetcher_autocreated=True)
the_category.save()
logger.info("Inserting a category %s." % edstats_category_name_in_db.encode('utf8'))
else:
the_category = DatasetCategory.objects.get(name=edstats_category_name_in_db)
existing_subcategories = DatasetSubcategory.objects.filter(categoryId=the_category.pk).values('name')
existing_subcategories_list = {item['name'] for item in existing_subcategories}
edstats_categories_list = []
for key, value in category_vars.items():
edstats_categories_list.append(key)
if key not in existing_subcategories_list:
the_subcategory = DatasetSubcategory(name=key, categoryId=the_category)
the_subcategory.save()
logger.info("Inserting a subcategory %s." % key.encode('utf8'))
existing_entities = Entity.objects.values('name')
existing_entities_list = {item['name'] for item in existing_entities}
country_tool_names = CountryName.objects.all()
country_tool_names_dict = {}
for each in country_tool_names:
country_tool_names_dict[each.country_name.lower()] = each.owid_country
country_name_entity_ref = {} # this dict will hold the country names from excel and the appropriate entity object (this is used when saving the variables and their values)
row_number = 0
for row in country_ws.rows:
row_number += 1
for cell in row:
if row_number > 1:
column_number += 1
if column_number == 1:
country_code = cell.value
if column_number == 3:
country_name = cell.value
if column_number == 7:
country_special_notes = cell.value
if column_number == 8:
country_region = cell.value
if column_number == 9:
country_income_group = cell.value
if column_number == 24:
country_latest_census = cell.value
if column_number == 25:
country_latest_survey = cell.value
if column_number == 26:
country_recent_income_source = cell.value
if column_number == 31:
entity_info = AdditionalCountryInfo()
entity_info.country_code = country_code
entity_info.country_name = country_name
entity_info.country_wb_region = country_region
entity_info.country_wb_income_group = country_income_group
entity_info.country_special_notes = country_special_notes
entity_info.country_latest_census = country_latest_census
entity_info.country_latest_survey = country_latest_survey
entity_info.country_recent_income_source = country_recent_income_source
entity_info.dataset = 'edstats'
entity_info.save()
if country_tool_names_dict.get(unidecode.unidecode(country_name.lower()), 0):
newentity = Entity.objects.get(name=country_tool_names_dict[unidecode.unidecode(country_name.lower())].owid_name)
elif country_name in existing_entities_list:
newentity = Entity.objects.get(name=country_name)
else:
newentity = Entity(name=country_name, validated=False)
newentity.save()
logger.info("Inserting a country %s." % newentity.name.encode('utf8'))
country_name_entity_ref[country_code] = newentity
column_number = 0
# this block of code is needed to insert the country British Virgin Islands with the code VGB
# without inserting this country name, the script will throw an error when reading the data values
# the EdStats file seems to be missing this country name and info in the Country worksheet
country_name = 'British Virgin Islands'
country_code = 'VGB'
if country_tool_names_dict.get(unidecode.unidecode(country_name.lower()), 0):
newentity = Entity.objects.get(
name=country_tool_names_dict[unidecode.unidecode(country_name.lower())].owid_name)
elif country_name in existing_entities_list:
newentity = Entity.objects.get(name=country_name)
else:
newentity = Entity(name=country_name, validated=False)
newentity.save()
logger.info("Inserting a country %s." % newentity.name.encode('utf8'))
country_name_entity_ref[country_code] = newentity
# end of VGB-related code block
insert_string = 'INSERT into data_values (value, year, entityId, variableId) VALUES (%s, %s, %s, %s)' # this is used for constructing the query for mass inserting to the data_values table
data_values_tuple_list = []
datasets_list = []
for category in edstats_categories_list:
newdataset = Dataset(name='World Bank EdStats - ' + category,
description='This is a dataset imported by the automated fetcher',
namespace='edstats', categoryId=the_category,
subcategoryId=DatasetSubcategory.objects.get(name=category, categoryId=the_category))
newdataset.save()
datasets_list.append(newdataset)
logger.info("Inserting a dataset %s." % newdataset.name.encode('utf8'))
row_number = 0
columns_to_years = {}
for row in data_ws.rows:
row_number += 1
data_values = []
for cell in row:
if row_number == 1:
column_number += 1
if cell.value:
try:
last_available_year = int(cell.value)
columns_to_years[column_number] = last_available_year
last_available_column = column_number
except:
pass
if row_number > 1:
column_number += 1
if column_number == 1:
country_name = cell.value
if column_number == 2:
country_code = cell.value
if column_number == 3:
indicator_name = cell.value
if column_number == 4:
indicator_code = cell.value.upper().strip()
if column_number > 4 and column_number <= last_available_column:
if cell.value or cell.value == 0:
data_values.append({'value': cell.value, 'year': columns_to_years[column_number]})
if column_number > 4 and column_number == last_available_column:
if len(data_values):
if indicator_code in category_vars[category]:
if not global_cat[indicator_code]['saved']:
source_description['additionalInfo'] = "Definitions and characteristics of countries and other territories: " + "https://ourworldindata.org" + reverse("serveedstatscountryinfo") + "\n"
source_description['additionalInfo'] += "Limitations and exceptions:\n" + global_cat[indicator_code]['limitations'] + "\n" if global_cat[indicator_code]['limitations'] else ''
source_description['additionalInfo'] += "Notes from original source:\n" + global_cat[indicator_code]['sourcenotes'] + "\n" if global_cat[indicator_code]['sourcenotes'] else ''
source_description['additionalInfo'] += "General comments:\n" + global_cat[indicator_code]['comments'] + "\n" if global_cat[indicator_code]['comments'] else ''
source_description['additionalInfo'] += "Statistical concept and methodology:\n" + global_cat[indicator_code]['concept'] + "\n" if global_cat[indicator_code]['concept'] else ''
source_description['additionalInfo'] += "Related source links:\n" + global_cat[indicator_code]['sourcelinks'] + "\n" if global_cat[indicator_code]['sourcelinks'] else ''
source_description['additionalInfo'] += "Other web links:\n" + global_cat[indicator_code]['weblinks'] + "\n" if global_cat[indicator_code]['weblinks'] else ''
source_description['dataPublisherSource'] = global_cat[indicator_code]['source']
newsource = Source(name='World Bank EdStats: ' + global_cat[indicator_code]['name'],
description=json.dumps(source_description),
datasetId=newdataset.pk)
newsource.save()
logger.info("Inserting a source %s." % newsource.name.encode('utf8'))
s_unit = short_unit_extract(global_cat[indicator_code]['unitofmeasure'])
newvariable = Variable(name=global_cat[indicator_code]['name'], unit=global_cat[indicator_code]['unitofmeasure'] if global_cat[indicator_code]['unitofmeasure'] else '', short_unit=s_unit, description=global_cat[indicator_code]['description'],
code=indicator_code, timespan='1970-' + str(last_available_year), datasetId=newdataset, variableTypeId=VariableType.objects.get(pk=4), sourceId=newsource)
newvariable.save()
logger.info("Inserting a variable %s." % newvariable.name.encode('utf8'))
global_cat[indicator_code]['variable_object'] = newvariable
global_cat[indicator_code]['saved'] = True
else:
newvariable = global_cat[indicator_code]['variable_object']
for i in range(0, len(data_values)):
data_values_tuple_list.append((data_values[i]['value'], data_values[i]['year'], country_name_entity_ref[country_code].pk, newvariable.pk))
if len(data_values_tuple_list) > 3000: # insert when the length of the list goes over 3000
with connection.cursor() as c:
c.executemany(insert_string, data_values_tuple_list)
logger.info("Dumping data values...")
data_values_tuple_list = []
column_number = 0
if row_number % 10 == 0:
time.sleep(0.001) # this is done in order to not keep the CPU busy all the time, the delay after each 10th row is 1 millisecond
if len(data_values_tuple_list): # insert any leftover data_values
with connection.cursor() as c:
c.executemany(insert_string, data_values_tuple_list)
logger.info("Dumping data values...")
newimport = ImportHistory(import_type='edstats', import_time=timezone.now().strftime('%Y-%m-%d %H:%M:%S'),
import_notes='Initial import of Edstats',
import_state=json.dumps({'file_hash': file_checksum(edstats_downloads_save_location + 'edstats.zip')}))
newimport.save()
for dataset in datasets_list:
write_dataset_csv(dataset.pk, dataset.name, None, 'edstats_fetcher', '')
logger.info("Import complete.")
else:
last_import = import_history.last()
deleted_indicators = {} # This is used to keep track which variables' data values were already deleted before writing new values
if json.loads(last_import.import_state)['file_hash'] == file_checksum(edstats_downloads_save_location + 'edstats.zip'):
logger.info('No updates available.')
sys.exit('No updates available.')
logger.info('New data is available.')
available_variables = Variable.objects.filter(datasetId__in=Dataset.objects.filter(namespace='edstats'))
available_variables_list = []
for each in available_variables.values('code'):
available_variables_list.append(each['code'])
chart_dimension_vars = ChartDimension.objects.all().values('variableId').distinct()
chart_dimension_vars_list = {item['variableId'] for item in chart_dimension_vars}
existing_variables_ids = [item['id'] for item in available_variables.values('id')]
existing_variables_id_code = {item['id']: item['code'] for item in available_variables.values('id', 'code')}
existing_variables_code_id = {item['code']: item['id'] for item in available_variables.values('id', 'code')}
vars_being_used = [] # we will not be deleting any variables that are currently being used by charts
for each_var in existing_variables_ids:
if each_var in chart_dimension_vars_list:
vars_being_used.append(existing_variables_id_code[each_var])
wb = load_workbook(excel_filename, read_only=True)
series_ws = wb['Series']
data_ws = wb['Data']
country_ws = wb['Country']
column_number = 0 # this will be reset to 0 on each new row
row_number = 0 # this will be reset to 0 if we switch to another worksheet, or start reading the worksheet from the beginning one more time
global_cat = {} # global catalog of indicators
# data in the worksheets is not loaded into memory at once, that causes RAM to quickly fill up
# instead, we go through each row and cell one-by-one, looking at each piece of data separately
# this has the disadvantage of needing to traverse the worksheet several times, if we need to look up some rows/cells again
for row in series_ws.rows:
row_number += 1
for cell in row:
if row_number > 1:
column_number += 1
if column_number == 1:
global_cat[cell.value.upper().strip()] = {}
indicatordict = global_cat[cell.value.upper().strip()]
if column_number == 2:
indicatordict['category'] = cell.value
if column_number == 3:
indicatordict['name'] = cell.value
if column_number == 5:
indicatordict['description'] = cell.value
if column_number == 6:
if cell.value:
indicatordict['unitofmeasure'] = cell.value
else:
if '(' not in indicatordict['name']:
indicatordict['unitofmeasure'] = ''
else:
indicatordict['unitofmeasure'] = indicatordict['name'][
indicatordict['name'].rfind('(') + 1:indicatordict[
'name'].rfind(')')]
if column_number == 11:
if cell.value:
indicatordict['limitations'] = cell.value
else:
indicatordict['limitations'] = ''
if column_number == 12:
if cell.value:
indicatordict['sourcenotes'] = cell.value
else:
indicatordict['sourcenotes'] = ''
if column_number == 13:
if cell.value:
indicatordict['comments'] = cell.value
else:
indicatordict['comments'] = ''
if column_number == 14:
indicatordict['source'] = cell.value
if column_number == 15:
if cell.value:
indicatordict['concept'] = cell.value
else:
indicatordict['concept'] = ''
if column_number == 17:
if cell.value:
indicatordict['sourcelinks'] = cell.value
else:
indicatordict['sourcelinks'] = ''
if column_number == 18:
if cell.value:
indicatordict['weblinks'] = cell.value
else:
indicatordict['weblinks'] = ''
indicatordict['saved'] = False
column_number = 0
new_variables = []
for key, value in global_cat.items():
new_variables.append(key)
vars_to_add = list(set(new_variables).difference(available_variables_list))
newly_added_vars = list(set(new_variables).difference(available_variables_list))
vars_to_delete = list(set(available_variables_list).difference(new_variables))
for each in vars_to_delete:
if each not in vars_being_used:
logger.info("Deleting data values for the variable: %s" % each.encode('utf8'))
while DataValue.objects.filter(variableId__pk=existing_variables_code_id[each]).first():
with connection.cursor() as c: # if we don't limit the deleted values, the db might just hang
c.execute('DELETE FROM %s WHERE variableId = %s LIMIT 10000;' %
(DataValue._meta.db_table, existing_variables_code_id[each]))
source_object = Variable.objects.get(code=each, datasetId__in=Dataset.objects.filter(namespace='edstats')).sourceId
Variable.objects.get(code=each, datasetId__in=Dataset.objects.filter(namespace='edstats')).delete()
logger.info("Deleting the variable: %s" % each.encode('utf8'))
logger.info("Deleting the source: %s" % source_object.name.encode('utf8'))
source_object.delete()
category_vars = {} # categories and their corresponding variables
for key, value in global_cat.items():
if value['category'] in category_vars:
category_vars[value['category']].append(key)
else:
category_vars[value['category']] = []
category_vars[value['category']].append(key)
existing_categories = DatasetCategory.objects.values('name')
existing_categories_list = {item['name'] for item in existing_categories}
if edstats_category_name_in_db not in existing_categories_list:
the_category = DatasetCategory(name=edstats_category_name_in_db, fetcher_autocreated=True)
the_category.save()
logger.info("Inserting a category %s." % edstats_category_name_in_db.encode('utf8'))
else:
the_category = DatasetCategory.objects.get(name=edstats_category_name_in_db)
existing_subcategories = DatasetSubcategory.objects.filter(categoryId=the_category).values('name')
existing_subcategories_list = {item['name'] for item in existing_subcategories}
edstats_categories_list = []
for key, value in category_vars.items():
edstats_categories_list.append(key)
if key not in existing_subcategories_list:
the_subcategory = DatasetSubcategory(name=key, categoryId=the_category)
the_subcategory.save()
logger.info("Inserting a subcategory %s." % key.encode('utf8'))
cats_to_add = list(set(edstats_categories_list).difference(list(existing_subcategories_list)))
existing_entities = Entity.objects.values('name')
existing_entities_list = {item['name'] for item in existing_entities}
country_tool_names = CountryName.objects.all()
country_tool_names_dict = {}
for each in country_tool_names:
country_tool_names_dict[each.country_name.lower()] = each.owid_country
country_name_entity_ref = {} # this dict will hold the country names from excel and the appropriate entity object (this is used when saving the variables and their values)
AdditionalCountryInfo.objects.filter(dataset='edstats').delete() # We will load new additional country data now
row_number = 0
for row in country_ws.rows:
row_number += 1
for cell in row:
if row_number > 1:
column_number += 1
if column_number == 1:
country_code = cell.value
if column_number == 3:
country_name = cell.value
if column_number == 7:
country_special_notes = cell.value
if column_number == 8:
country_region = cell.value
if column_number == 9:
country_income_group = cell.value
if column_number == 24:
country_latest_census = cell.value
if column_number == 25:
country_latest_survey = cell.value
if column_number == 26:
country_recent_income_source = cell.value
if column_number == 31:
entity_info = AdditionalCountryInfo()
entity_info.country_code = country_code
entity_info.country_name = country_name
entity_info.country_wb_region = country_region
entity_info.country_wb_income_group = country_income_group
entity_info.country_special_notes = country_special_notes
entity_info.country_latest_census = country_latest_census
entity_info.country_latest_survey = country_latest_survey
entity_info.country_recent_income_source = country_recent_income_source
entity_info.dataset = 'edstats'
entity_info.save()
if country_tool_names_dict.get(unidecode.unidecode(country_name.lower()), 0):
newentity = Entity.objects.get(name=country_tool_names_dict[unidecode.unidecode(country_name.lower())].owid_name)
elif country_name in existing_entities_list:
newentity = Entity.objects.get(name=country_name)
else:
newentity = Entity(name=country_name, validated=False)
newentity.save()
logger.info("Inserting a country %s." % newentity.name.encode('utf8'))
country_name_entity_ref[country_code] = newentity
column_number = 0
# this block of code is needed to insert the country British Virgin Islands with the code VGB
# without inserting this country name, the script will throw an error when reading the data values
# the EdStats file seems to be missing this country name and info in their Country worksheet
country_name = 'British Virgin Islands'
country_code = 'VGB'
if country_tool_names_dict.get(unidecode.unidecode(country_name.lower()), 0):
newentity = Entity.objects.get(
name=country_tool_names_dict[unidecode.unidecode(country_name.lower())].owid_name)
elif country_name in existing_entities_list:
newentity = Entity.objects.get(name=country_name)
else:
newentity = Entity(name=country_name, validated=False)
newentity.save()
logger.info("Inserting a country %s." % newentity.name.encode('utf8'))
country_name_entity_ref[country_code] = newentity
# end of VGB-related code block
insert_string = 'INSERT into data_values (value, year, entityId, variableId) VALUES (%s, %s, %s, %s)' # this is used for constructing the query for mass inserting to the data_values table
data_values_tuple_list = []
total_values_tracker = 0
dataset_id_oldname_list = []
for category in edstats_categories_list:
if category in cats_to_add:
newdataset = Dataset(name='World Bank EdStats - ' + category,
description='This is a dataset imported by the automated fetcher',
namespace='edstats', categoryId=the_category,
subcategoryId=DatasetSubcategory.objects.get(name=category,
categoryId=the_category))
newdataset.save()
dataset_id_oldname_list.append({'id': newdataset.pk, 'newname': newdataset.name, 'oldname': None})
logger.info("Inserting a dataset %s." % newdataset.name.encode('utf8'))
else:
newdataset = Dataset.objects.get(name='World Bank EdStats - ' + category, categoryId=DatasetCategory.objects.get(
name=edstats_category_name_in_db))
dataset_id_oldname_list.append({'id': newdataset.pk, 'newname': newdataset.name, 'oldname': newdataset.name})
row_number = 0
columns_to_years = {}
for row in data_ws.rows:
row_number += 1
data_values = []
for cell in row:
if row_number == 1:
column_number += 1
if cell.value:
try:
last_available_year = int(cell.value)
columns_to_years[column_number] = last_available_year
last_available_column = column_number
except:
pass
if row_number > 1:
column_number += 1
if column_number == 1:
country_name = cell.value
if column_number == 2:
country_code = cell.value
if column_number == 3:
indicator_name = cell.value
if column_number == 4:
indicator_code = cell.value.upper().strip()
if column_number > 4 and column_number <= last_available_column:
if cell.value or cell.value == 0:
data_values.append({'value': cell.value, 'year': columns_to_years[column_number]})
if column_number > 4 and column_number == last_available_column:
if len(data_values):
if indicator_code in category_vars[category]:
total_values_tracker += len(data_values)
if indicator_code in vars_to_add:
source_description['additionalInfo'] = "Definitions and characteristics of countries and other territories: " + "https://ourworldindata.org" + reverse("serveedstatscountryinfo") + "\n"
source_description['additionalInfo'] += "Limitations and exceptions:\n" + global_cat[indicator_code]['limitations'] + "\n" if global_cat[indicator_code]['limitations'] else ''
source_description['additionalInfo'] += "Notes from original source:\n" + global_cat[indicator_code]['sourcenotes'] + "\n" if global_cat[indicator_code]['sourcenotes'] else ''
source_description['additionalInfo'] += "General comments:\n" + global_cat[indicator_code]['comments'] + "\n" if global_cat[indicator_code]['comments'] else ''
source_description['additionalInfo'] += "Statistical concept and methodology:\n" + global_cat[indicator_code]['concept'] + "\n" if global_cat[indicator_code]['concept'] else ''
source_description['additionalInfo'] += "Related source links:\n" + global_cat[indicator_code]['sourcelinks'] + "\n" if global_cat[indicator_code]['sourcelinks'] else ''
source_description['additionalInfo'] += "Other web links:\n" + global_cat[indicator_code]['weblinks'] + "\n" if global_cat[indicator_code]['weblinks'] else ''
source_description['dataPublisherSource'] = global_cat[indicator_code]['source']
newsource = Source(name='World Bank EdStats: ' + global_cat[indicator_code]['name'],
description=json.dumps(source_description),
datasetId=newdataset.pk)
newsource.save()
logger.info("Inserting a source %s." % newsource.name.encode('utf8'))
global_cat[indicator_code]['source_object'] = newsource
s_unit = short_unit_extract(global_cat[indicator_code]['unitofmeasure'])
newvariable = Variable(name=global_cat[indicator_code]['name'],
unit=global_cat[indicator_code]['unitofmeasure'] if
global_cat[indicator_code]['unitofmeasure'] else '',
short_unit=s_unit,
description=global_cat[indicator_code]['description'],
code=indicator_code,
timespan='1970-' + str(last_available_year),
datasetId=newdataset,
variableTypeId=VariableType.objects.get(pk=4),
sourceId=newsource)
newvariable.save()
global_cat[indicator_code]['variable_object'] = newvariable
vars_to_add.remove(indicator_code)
global_cat[indicator_code]['saved'] = True
logger.info("Inserting a variable %s." % newvariable.name.encode('utf8'))
else:
if not global_cat[indicator_code]['saved']:
newsource = Source.objects.get(name='World Bank EdStats: ' + Variable.objects.get(code=indicator_code, datasetId__in=Dataset.objects.filter(namespace='edstats')).name)
newsource.name = 'World Bank EdStats: ' + global_cat[indicator_code]['name']
source_description['additionalInfo'] = "Definitions and characteristics of countries and other territories: " + "https://ourworldindata.org" + reverse("serveedstatscountryinfo") + "\n"
source_description['additionalInfo'] += "Limitations and exceptions:\n" + global_cat[indicator_code]['limitations'] + "\n" if global_cat[indicator_code]['limitations'] else ''
source_description['additionalInfo'] += "Notes from original source:\n" + global_cat[indicator_code]['sourcenotes'] + "\n" if global_cat[indicator_code]['sourcenotes'] else ''
source_description['additionalInfo'] += "General comments:\n" + global_cat[indicator_code]['comments'] + "\n" if global_cat[indicator_code]['comments'] else ''
source_description['additionalInfo'] += "Statistical concept and methodology:\n" + global_cat[indicator_code]['concept'] + "\n" if global_cat[indicator_code]['concept'] else ''
source_description['additionalInfo'] += "Related source links:\n" + global_cat[indicator_code]['sourcelinks'] + "\n" if global_cat[indicator_code]['sourcelinks'] else ''
source_description['additionalInfo'] += "Other web links:\n" + global_cat[indicator_code]['weblinks'] + "\n" if global_cat[indicator_code]['weblinks'] else ''
source_description['dataPublisherSource'] = global_cat[indicator_code]['source']
newsource.description=json.dumps(source_description)
newsource.datasetId=newdataset.pk
newsource.save()
logger.info("Updating the source %s." % newsource.name.encode('utf8'))
s_unit = short_unit_extract(global_cat[indicator_code]['unitofmeasure'])
newvariable = Variable.objects.get(code=indicator_code, datasetId__in=Dataset.objects.filter(namespace='edstats'))
newvariable.name = global_cat[indicator_code]['name']
newvariable.unit=global_cat[indicator_code]['unitofmeasure'] if global_cat[indicator_code]['unitofmeasure'] else ''
newvariable.short_unit = s_unit
newvariable.description=global_cat[indicator_code]['description']
newvariable.timespan='1970-' + str(last_available_year)
newvariable.datasetId=newdataset
newvariable.sourceId=newsource
newvariable.save()
global_cat[indicator_code]['variable_object'] = newvariable
logger.info("Updating the variable %s." % newvariable.name.encode('utf8'))
global_cat[indicator_code]['saved'] = True
else:
newvariable = global_cat[indicator_code]['variable_object']
if indicator_code not in newly_added_vars:
if not deleted_indicators.get(indicator_code, 0):
while DataValue.objects.filter(variableId__pk=newvariable.pk).first():
with connection.cursor() as c:
c.execute(
'DELETE FROM %s WHERE variableId = %s LIMIT 10000;' %
(DataValue._meta.db_table, newvariable.pk))
deleted_indicators[indicator_code] = True
logger.info("Deleting data values for the variable %s." % indicator_code.encode('utf8'))
for i in range(0, len(data_values)):
data_values_tuple_list.append((data_values[i]['value'], data_values[i]['year'],
country_name_entity_ref[country_code].pk,
newvariable.pk))
if len(
data_values_tuple_list) > 3000: # insert when the length of the list goes over 3000
with connection.cursor() as c:
c.executemany(insert_string, data_values_tuple_list)
logger.info("Dumping data values...")
data_values_tuple_list = []
column_number = 0
if row_number % 10 == 0:
time.sleep(0.001) # this is done in order to not keep the CPU busy all the time, the delay after each 10th row is 1 millisecond
if len(data_values_tuple_list): # insert any leftover data_values
with connection.cursor() as c:
c.executemany(insert_string, data_values_tuple_list)
logger.info("Dumping data values...")
# now deleting subcategories and datasets that are empty (that don't contain any variables), if any
all_edstats_datasets = Dataset.objects.filter(namespace='edstats')
all_edstats_datasets_with_vars = Variable.objects.filter(datasetId__in=all_edstats_datasets).values(
'datasetId').distinct()
all_edstats_datasets_with_vars_dict = {item['datasetId'] for item in all_edstats_datasets_with_vars}
for each in all_edstats_datasets:
if each.pk not in all_edstats_datasets_with_vars_dict:
cat_to_delete = each.subcategoryId
logger.info("Deleting empty dataset %s." % each.name)
logger.info("Deleting empty category %s." % cat_to_delete.name)
each.delete()
cat_to_delete.delete()
newimport = ImportHistory(import_type='edstats', import_time=timezone.now().strftime('%Y-%m-%d %H:%M:%S'),
import_notes='Imported a total of %s data values.' % total_values_tracker,
import_state=json.dumps(
{'file_hash': file_checksum(edstats_downloads_save_location + 'edstats.zip')}))
newimport.save()
# now exporting csvs to the repo
for dataset in dataset_id_oldname_list:
write_dataset_csv(dataset['id'], dataset['newname'], dataset['oldname'], 'edstats_fetcher', '')
print("--- %s seconds ---" % (time.time() - start_time))
| mit |
grpc/grpc | examples/python/metadata/helloworld_pb2_grpc.py | 148 | 1331 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import helloworld_pb2 as helloworld__pb2
class GreeterStub(object):
"""The greeting service definition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SayHello = channel.unary_unary(
'/helloworld.Greeter/SayHello',
request_serializer=helloworld__pb2.HelloRequest.SerializeToString,
response_deserializer=helloworld__pb2.HelloReply.FromString,
)
class GreeterServicer(object):
"""The greeting service definition.
"""
def SayHello(self, request, context):
"""Sends a greeting
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GreeterServicer_to_server(servicer, server):
rpc_method_handlers = {
'SayHello': grpc.unary_unary_rpc_method_handler(
servicer.SayHello,
request_deserializer=helloworld__pb2.HelloRequest.FromString,
response_serializer=helloworld__pb2.HelloReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'helloworld.Greeter', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| apache-2.0 |
DavidLP/home-assistant | homeassistant/components/config/customize.py | 8 | 1403 | """Provide configuration end points for Customize."""
from homeassistant.components.homeassistant import SERVICE_RELOAD_CORE_CONFIG
from homeassistant.config import DATA_CUSTOMIZE
from homeassistant.core import DOMAIN
import homeassistant.helpers.config_validation as cv
from . import EditKeyBasedConfigView
CONFIG_PATH = 'customize.yaml'
async def async_setup(hass):
"""Set up the Customize config API."""
async def hook(hass):
"""post_write_hook for Config View that reloads groups."""
await hass.services.async_call(DOMAIN, SERVICE_RELOAD_CORE_CONFIG)
hass.http.register_view(CustomizeConfigView(
'customize', 'config', CONFIG_PATH, cv.entity_id, dict,
post_write_hook=hook
))
return True
class CustomizeConfigView(EditKeyBasedConfigView):
"""Configure a list of entries."""
def _get_value(self, hass, data, config_key):
"""Get value."""
customize = hass.data.get(DATA_CUSTOMIZE, {}).get(config_key) or {}
return {'global': customize, 'local': data.get(config_key, {})}
def _write_value(self, hass, data, config_key, new_value):
"""Set value."""
data[config_key] = new_value
state = hass.states.get(config_key)
state_attributes = dict(state.attributes)
state_attributes.update(new_value)
hass.states.async_set(config_key, state.state, state_attributes)
| apache-2.0 |
hashimmm/iiifoo | source_mappings.py | 1 | 1933 | import inspect
from mapped_source_requests import mapping_interfaces
from authoring_requests import authoring_api_mixins, authoring_base
from authoring_requests.authoring_requests import (
VanillaBase, VanillaDeleteRequest, VanillaExportRequest,
StandardAuthoringAPIv1, StandardAuthoringAPIv1Base
)
from mapped_source_requests.mapping_interfaces import \
MappedSourceRequestBase
import source_type_plugins
from source_type_plugins import *
# DO NOT REMOVE THE ABOVE IMPORT, IT IS NOT UNUSED.
plugins = []
for item in source_type_plugins.__all__:
if item.startswith('__') and item.endswith('__'):
continue
plugins.append({k: v for k, v in locals()[item].__dict__.items()
if not (k.startswith('__') and k.endswith('__'))})
source_mapping = {
('test', 'base'): VanillaBase,
('test', 'export'): VanillaExportRequest,
('test', 'delete'): VanillaDeleteRequest,
('api_v0_1a', 'base'): StandardAuthoringAPIv1Base,
('api_v0_1a', 'export'): StandardAuthoringAPIv1,
}
def _req_type(obj):
t = None
if not inspect.isclass(obj):
pass
elif issubclass(obj, authoring_api_mixins.ViewRequestInterface):
t = 'view'
elif issubclass(obj, authoring_api_mixins.DeleteRequestInterface):
t = 'delete'
elif issubclass(obj, authoring_api_mixins.ExportRequestInterface):
t = 'export'
elif issubclass(obj, authoring_base.AuthoringRequestBase)\
or issubclass(obj, mapping_interfaces.MappedSourceRequestBase):
t = 'base'
return t
for objects in plugins:
for obj in objects:
is_request = _req_type(objects[obj])
if is_request:
source_mapping[(objects[obj].type_name, is_request)] = objects[obj]
def is_dynamic_source(cls):
return issubclass(cls, MappedSourceRequestBase)
def is_dynamic_source_object(obj):
return issubclass(obj.__class__, MappedSourceRequestBase)
| mit |
hectord/lettuce | tests/integration/lib/Django-1.2.5/django/db/models/sql/subqueries.py | 45 | 7688 | """
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.core.exceptions import FieldError
from django.db import connections
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import Date
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import Query
from django.db.models.sql.where import AND, Constraint
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'DateQuery',
'AggregateQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
compiler = 'SQLDeleteCompiler'
def do_query(self, table, where, using):
self.tables = [table]
self.where = where
self.get_compiler(using).execute_sql(None)
def delete_batch(self, pk_list, using):
"""
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
where = self.where_class()
field = self.model._meta.pk
where.add((Constraint(None, field.column, field), 'in',
pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]), AND)
self.do_query(self.model._meta.db_table, where, using=using)
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
compiler = 'SQLUpdateCompiler'
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass,
related_updates=self.related_updates.copy(), **kwargs)
def clear_related(self, related_field, pk_list, using):
"""
Set up and execute an update query that clears related entries for the
keys in pk_list.
This is used by the QuerySet.delete_objects() method.
"""
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
f = self.model._meta.pk
self.where.add((Constraint(None, f.column, f), 'in',
pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]),
AND)
self.values = [(related_field, None, None)]
self.get_compiler(using).execute_sql(None)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in values.iteritems():
field, model, direct, m2m = self.model._meta.get_field_by_name(name)
if not direct or m2m:
raise FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field)
if model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Turn a sequence of (field, model, value) triples into an update query.
Used by add_update_values() as well as the "fast" update path when
saving models.
"""
self.values.extend(values_seq)
def add_related_update(self, model, field, value):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
try:
self.related_updates[model].append((field, None, value))
except KeyError:
self.related_updates[model] = [(field, None, value)]
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in self.related_updates.iteritems():
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
compiler = 'SQLInsertCompiler'
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.columns = []
self.values = []
self.params = ()
def clone(self, klass=None, **kwargs):
extras = {
'columns': self.columns[:],
'values': self.values[:],
'params': self.params
}
extras.update(kwargs)
return super(InsertQuery, self).clone(klass, **extras)
def insert_values(self, insert_values, raw_values=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
placeholders, values = [], []
for field, val in insert_values:
placeholders.append((field, val))
self.columns.append(field.column)
values.append(val)
if raw_values:
self.values.extend([(None, v) for v in values])
else:
self.params += tuple(values)
self.values.extend(placeholders)
class DateQuery(Query):
"""
A DateQuery is a normal query, except that it specifically selects a single
date field. This requires some special handling when converting the results
back to Python objects, so we put it in a separate class.
"""
compiler = 'SQLDateCompiler'
def add_date_select(self, field, lookup_type, order='ASC'):
"""
Converts the query into a date extraction query.
"""
result = self.setup_joins([field.name], self.get_meta(),
self.get_initial_alias(), False)
alias = result[3][-1]
select = Date((alias, field.column), lookup_type)
self.select = [select]
self.select_fields = [None]
self.select_related = False # See #7097.
self.set_extra_mask([])
self.distinct = True
self.order_by = order == 'ASC' and [1] or [-1]
class AggregateQuery(Query):
"""
An AggregateQuery takes another query as a parameter to the FROM
clause and only selects the elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'
def add_subquery(self, query, using):
self.subquery, self.sub_params = query.get_compiler(using).as_sql(with_col_aliases=True)
| gpl-3.0 |
userdw/RaspberryPi_3_Starter_Kit | 05_Ambient_Light_Monitoring/Ambient_Light_Monitoring/MCP3202.py | 4 | 1394 | #!/usr/bin/python
import RPi.GPIO as GPIO
import time
import datetime
import os
from time import strftime
CS = 4
CS2 = 7
CLK = 11
MOSI = 10
MISO = 9
LDAC = 8
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(CS, GPIO.OUT)
GPIO.setup(CLK, GPIO.OUT)
GPIO.setup(MOSI, GPIO.OUT)
GPIO.setup(CS2, GPIO.OUT)
GPIO.setup(LDAC, GPIO.OUT)
GPIO.setup(MISO, GPIO.IN,pull_up_down = GPIO.PUD_UP)
GPIO.output(CS, True)
GPIO.output(CLK, False)
GPIO.output(MOSI, True)
def myspi(rdata):
dataX = 0
mask = 0x80
for i in range(8):
if(rdata & mask):
GPIO.output(MOSI, True)
else:
GPIO.output(MOSI, False)
GPIO.output(CLK, True)
if(GPIO.input(MISO) == 1):
dataX = dataX + mask
GPIO.output(CLK, False)
mask = mask >> 1
return dataX;
def readADC(ch):
cmd = 0
if ch == 0: cmd = 0x80
elif ch == 1: cmd = 0xc0
#elif ch == 2: cmd = 0x00
#elif ch == 4: cmd = 0x04
GPIO.output(CS, False)
a = myspi(0x01)
#print "a: ",a
b = myspi(cmd)
#print "b: ",b
c = myspi(0x00)
#print "c: ",c
v = ((b & 0x0f) << 8) + c
#print "v: ",v
GPIO.output(CS, True)
v = round(float(v), 2)
#v=round(float(v)/4095*3.3,2)
return v;
def setDAC(data, channel):
cmd = 0
if channel == 1: cmd = 0xF0
else: cmd = 0x70
GPIO.output(LDAC,False)
GPIO.output(CS2,False)
data = int(float(data * 4095 / 255))
a = myspi((data >> 8) + cmd)
b = myspi(data & 0xFF)
GPIO.output(CS2, True)
return; | mit |
marctc/django-extensions | django_extensions/management/commands/pipchecker.py | 27 | 11745 | import json
import os
from distutils.version import LooseVersion
from optparse import make_option
import pip
from django.core.management.base import NoArgsCommand, CommandError
from pip.req import parse_requirements
from django_extensions.management.color import color_style
from django_extensions.management.utils import signalcommand
try:
from urllib.parse import urlparse
from urllib.error import HTTPError
from urllib.request import Request, urlopen
from xmlrpc.client import ServerProxy
except ImportError:
# Python 2
from urlparse import urlparse
from urllib2 import HTTPError, Request, urlopen
from xmlrpclib import ServerProxy
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option(
"-t", "--github-api-token", action="store", dest="github_api_token",
help="A github api authentication token."
),
make_option(
"-r", "--requirement", action="append", dest="requirements",
default=[], metavar="FILENAME",
help="Check all the packages listed in the given requirements file. "
"This option can be used multiple times."
),
make_option(
"-n", "--newer", action="store_true", dest="show_newer",
help="Also show when newer version then available is installed."
),
)
help = "Scan pip requirement files for out-of-date packages."
@signalcommand
def handle_noargs(self, **options):
self.style = color_style()
self.options = options
if options["requirements"]:
req_files = options["requirements"]
elif os.path.exists("requirements.txt"):
req_files = ["requirements.txt"]
elif os.path.exists("requirements"):
req_files = ["requirements/{0}".format(f) for f in os.listdir("requirements")
if os.path.isfile(os.path.join("requirements", f)) and
f.lower().endswith(".txt")]
else:
raise CommandError("Requirements not found")
try:
from pip.download import PipSession
except ImportError:
raise CommandError("Pip version 6 or higher is required")
self.reqs = {}
with PipSession() as session:
for filename in req_files:
for req in parse_requirements(filename, session=session):
# url attribute changed to link in pip version 6.1.0 and above
if LooseVersion(pip.__version__) > LooseVersion('6.0.8'):
self.reqs[req.name] = {
"pip_req": req,
"url": req.link,
}
else:
self.reqs[req.name] = {
"pip_req": req,
"url": req.url,
}
if options["github_api_token"]:
self.github_api_token = options["github_api_token"]
elif os.environ.get("GITHUB_API_TOKEN"):
self.github_api_token = os.environ.get("GITHUB_API_TOKEN")
else:
self.github_api_token = None # only 50 requests per hour
self.check_pypi()
if HAS_REQUESTS:
self.check_github()
else:
print(self.style.ERROR("Cannot check github urls. The requests library is not installed. ( pip install requests )"))
self.check_other()
def _urlopen_as_json(self, url, headers=None):
"""Shorcut for return contents as json"""
req = Request(url, headers=headers)
return json.loads(urlopen(req).read())
def check_pypi(self):
"""
If the requirement is frozen to pypi, check for a new version.
"""
for dist in pip.get_installed_distributions():
name = dist.project_name
if name in self.reqs.keys():
self.reqs[name]["dist"] = dist
pypi = ServerProxy("https://pypi.python.org/pypi")
for name, req in list(self.reqs.items()):
if req["url"]:
continue # skipping github packages.
elif "dist" in req:
dist = req["dist"]
dist_version = LooseVersion(dist.version)
available = pypi.package_releases(req["pip_req"].name)
try:
available_version = LooseVersion(available[0])
except IndexError:
available_version = None
if not available_version:
msg = self.style.WARN("release is not on pypi (check capitalization and/or --extra-index-url)")
elif self.options['show_newer'] and dist_version > available_version:
msg = self.style.INFO("{0} available (newer installed)".format(available_version))
elif available_version > dist_version:
msg = self.style.INFO("{0} available".format(available_version))
else:
msg = "up to date"
del self.reqs[name]
continue
pkg_info = self.style.BOLD("{dist.project_name} {dist.version}".format(dist=dist))
else:
msg = "not installed"
pkg_info = name
print("{pkg_info:40} {msg}".format(pkg_info=pkg_info, msg=msg))
del self.reqs[name]
def check_github(self):
"""
If the requirement is frozen to a github url, check for new commits.
API Tokens
----------
For more than 50 github api calls per hour, pipchecker requires
authentication with the github api by settings the environemnt
variable ``GITHUB_API_TOKEN`` or setting the command flag
--github-api-token='mytoken'``.
To create a github api token for use at the command line::
curl -u 'rizumu' -d '{"scopes":["repo"], "note":"pipchecker"}' https://api.github.com/authorizations
For more info on github api tokens:
https://help.github.com/articles/creating-an-oauth-token-for-command-line-use
http://developer.github.com/v3/oauth/#oauth-authorizations-api
Requirement Format
------------------
Pipchecker gets the sha of frozen repo and checks if it is
found at the head of any branches. If it is not found then
the requirement is considered to be out of date.
Therefore, freezing at the commit hash will provide the expected
results, but if freezing at a branch or tag name, pipchecker will
not be able to determine with certainty if the repo is out of date.
Freeze at the commit hash (sha)::
git+git://github.com/django/django.git@393c268e725f5b229ecb554f3fac02cfc250d2df#egg=Django
Freeze with a branch name::
git+git://github.com/django/django.git@master#egg=Django
Freeze with a tag::
git+git://github.com/django/django.git@1.5b2#egg=Django
Do not freeze::
git+git://github.com/django/django.git#egg=Django
"""
for name, req in list(self.reqs.items()):
req_url = req["url"]
if not req_url:
continue
req_url = str(req_url)
if req_url.startswith("git") and "github.com/" not in req_url:
continue
if req_url.endswith(".tar.gz") or req_url.endswith(".tar.bz2") or req_url.endswith(".zip"):
continue
headers = {
"content-type": "application/json",
}
if self.github_api_token:
headers["Authorization"] = "token {0}".format(self.github_api_token)
try:
user, repo = urlparse(req_url).path.split("#")[0].strip("/").rstrip("/").split("/")
except (ValueError, IndexError) as e:
print(self.style.ERROR("\nFailed to parse %r: %s\n" % (req_url, e)))
continue
try:
#test_auth = self._urlopen_as_json("https://api.github.com/django/", headers=headers)
test_auth = requests.get("https://api.github.com/django/", headers=headers).json()
except HTTPError as e:
print("\n%s\n" % str(e))
return
if "message" in test_auth and test_auth["message"] == "Bad credentials":
print(self.style.ERROR("\nGithub API: Bad credentials. Aborting!\n"))
return
elif "message" in test_auth and test_auth["message"].startswith("API Rate Limit Exceeded"):
print(self.style.ERROR("\nGithub API: Rate Limit Exceeded. Aborting!\n"))
return
frozen_commit_sha = None
if ".git" in repo:
repo_name, frozen_commit_full = repo.split(".git")
if frozen_commit_full.startswith("@"):
frozen_commit_sha = frozen_commit_full[1:]
elif "@" in repo:
repo_name, frozen_commit_sha = repo.split("@")
if frozen_commit_sha is None:
msg = self.style.ERROR("repo is not frozen")
if frozen_commit_sha:
branch_url = "https://api.github.com/repos/{0}/{1}/branches".format(user, repo_name)
#branch_data = self._urlopen_as_json(branch_url, headers=headers)
branch_data = requests.get(branch_url, headers=headers).json()
frozen_commit_url = "https://api.github.com/repos/{0}/{1}/commits/{2}".format(
user, repo_name, frozen_commit_sha
)
#frozen_commit_data = self._urlopen_as_json(frozen_commit_url, headers=headers)
frozen_commit_data = requests.get(frozen_commit_url, headers=headers).json()
if "message" in frozen_commit_data and frozen_commit_data["message"] == "Not Found":
msg = self.style.ERROR("{0} not found in {1}. Repo may be private.".format(frozen_commit_sha[:10], name))
elif frozen_commit_sha in [branch["commit"]["sha"] for branch in branch_data]:
msg = self.style.BOLD("up to date")
else:
msg = self.style.INFO("{0} is not the head of any branch".format(frozen_commit_data["sha"][:10]))
if "dist" in req:
pkg_info = "{dist.project_name} {dist.version}".format(dist=req["dist"])
elif frozen_commit_sha is None:
pkg_info = name
else:
pkg_info = "{0} {1}".format(name, frozen_commit_sha[:10])
print("{pkg_info:40} {msg}".format(pkg_info=pkg_info, msg=msg))
del self.reqs[name]
def check_other(self):
"""
If the requirement is frozen somewhere other than pypi or github, skip.
If you have a private pypi or use --extra-index-url, consider contributing
support here.
"""
if self.reqs:
print(self.style.ERROR("\nOnly pypi and github based requirements are supported:"))
for name, req in self.reqs.items():
if "dist" in req:
pkg_info = "{dist.project_name} {dist.version}".format(dist=req["dist"])
elif "url" in req:
pkg_info = "{url}".format(url=req["url"])
else:
pkg_info = "unknown package"
print(self.style.BOLD("{pkg_info:40} is not a pypi or github requirement".format(pkg_info=pkg_info)))
| mit |
aselle/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/batch_reshape_test.py | 14 | 21821 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for BatchReshape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import batch_reshape as batch_reshape_lib
from tensorflow.contrib.distributions.python.ops import mvn_diag as mvn_lib
from tensorflow.contrib.distributions.python.ops import poisson as poisson_lib
from tensorflow.contrib.distributions.python.ops import wishart as wishart_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
class _BatchReshapeTest(object):
def make_wishart(self, dims, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = self.dtype([
[[1., 0.5],
[0.5, 1.]],
[[0.5, 0.25],
[0.25, 0.75]],
])
scale = np.reshape(np.concatenate([scale, scale], axis=0),
old_batch_shape + [dims, dims])
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
wishart = wishart_lib.WishartFull(df=5, scale=scale_ph)
reshape_wishart = batch_reshape_lib.BatchReshape(
distribution=wishart,
batch_shape=new_batch_shape_ph,
validate_args=True)
return wishart, reshape_wishart
def test_matrix_variate_sample_and_log_prob(self):
dims = 2
new_batch_shape = [4]
old_batch_shape = [2, 2]
wishart, reshape_wishart = self.make_wishart(
dims, new_batch_shape, old_batch_shape)
batch_shape = reshape_wishart.batch_shape_tensor()
event_shape = reshape_wishart.event_shape_tensor()
expected_sample_shape = [3, 1] + new_batch_shape + [dims, dims]
x = wishart.sample([3, 1], seed=42)
expected_sample = array_ops.reshape(x, expected_sample_shape)
actual_sample = reshape_wishart.sample([3, 1], seed=42)
expected_log_prob_shape = [3, 1] + new_batch_shape
expected_log_prob = array_ops.reshape(
wishart.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_wishart.log_prob(expected_sample)
with self.test_session() as sess:
[
batch_shape_,
event_shape_,
expected_sample_, actual_sample_,
expected_log_prob_, actual_log_prob_,
] = sess.run([
batch_shape,
event_shape,
expected_sample, actual_sample,
expected_log_prob, actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([dims, dims], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_wishart.batch_shape)
self.assertAllEqual([dims, dims], reshape_wishart.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_matrix_variate_stats(self):
dims = 2
new_batch_shape = [4]
old_batch_shape = [2, 2]
wishart, reshape_wishart = self.make_wishart(
dims, new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_matrix_stat_shape = new_batch_shape + [dims, dims]
expected_entropy = array_ops.reshape(
wishart.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_wishart.entropy()
expected_mean = array_ops.reshape(
wishart.mean(), expected_matrix_stat_shape)
actual_mean = reshape_wishart.mean()
expected_mode = array_ops.reshape(
wishart.mode(), expected_matrix_stat_shape)
actual_mode = reshape_wishart.mode()
expected_stddev = array_ops.reshape(
wishart.stddev(), expected_matrix_stat_shape)
actual_stddev = reshape_wishart.stddev()
expected_variance = array_ops.reshape(
wishart.variance(), expected_matrix_stat_shape)
actual_variance = reshape_wishart.variance()
with self.test_session() as sess:
[
expected_entropy_, actual_entropy_,
expected_mean_, actual_mean_,
expected_mode_, actual_mode_,
expected_stddev_, actual_stddev_,
expected_variance_, actual_variance_,
] = sess.run([
expected_entropy, actual_entropy,
expected_mean, actual_mean,
expected_mode, actual_mode,
expected_stddev, actual_stddev,
expected_variance, actual_variance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_variance.shape)
def make_normal(self, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = self.dtype(0.5 + np.arange(
np.prod(old_batch_shape)).reshape(old_batch_shape))
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
normal = normal_lib.Normal(loc=self.dtype(0), scale=scale_ph)
reshape_normal = batch_reshape_lib.BatchReshape(
distribution=normal,
batch_shape=new_batch_shape_ph,
validate_args=True)
return normal, reshape_normal
def test_scalar_variate_sample_and_log_prob(self):
new_batch_shape = [2, 2]
old_batch_shape = [4]
normal, reshape_normal = self.make_normal(
new_batch_shape, old_batch_shape)
batch_shape = reshape_normal.batch_shape_tensor()
event_shape = reshape_normal.event_shape_tensor()
expected_sample_shape = new_batch_shape
x = normal.sample(seed=52)
expected_sample = array_ops.reshape(x, expected_sample_shape)
actual_sample = reshape_normal.sample(seed=52)
expected_log_prob_shape = new_batch_shape
expected_log_prob = array_ops.reshape(
normal.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_normal.log_prob(expected_sample)
with self.test_session() as sess:
[
batch_shape_,
event_shape_,
expected_sample_, actual_sample_,
expected_log_prob_, actual_log_prob_,
] = sess.run([
batch_shape,
event_shape,
expected_sample, actual_sample,
expected_log_prob, actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_normal.batch_shape)
self.assertAllEqual([], reshape_normal.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_scalar_variate_stats(self):
new_batch_shape = [2, 2]
old_batch_shape = [4]
normal, reshape_normal = self.make_normal(new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_entropy = array_ops.reshape(
normal.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_normal.entropy()
expected_mean = array_ops.reshape(
normal.mean(), expected_scalar_stat_shape)
actual_mean = reshape_normal.mean()
expected_mode = array_ops.reshape(
normal.mode(), expected_scalar_stat_shape)
actual_mode = reshape_normal.mode()
expected_stddev = array_ops.reshape(
normal.stddev(), expected_scalar_stat_shape)
actual_stddev = reshape_normal.stddev()
expected_variance = array_ops.reshape(
normal.variance(), expected_scalar_stat_shape)
actual_variance = reshape_normal.variance()
with self.test_session() as sess:
[
expected_entropy_, actual_entropy_,
expected_mean_, actual_mean_,
expected_mode_, actual_mode_,
expected_stddev_, actual_stddev_,
expected_variance_, actual_variance_,
] = sess.run([
expected_entropy, actual_entropy,
expected_mean, actual_mean,
expected_mode, actual_mode,
expected_stddev, actual_stddev,
expected_variance, actual_variance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_variance.shape)
def make_mvn(self, dims, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)
reshape_mvn = batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
return mvn, reshape_mvn
def test_vector_variate_sample_and_log_prob(self):
dims = 3
new_batch_shape = [2, 1]
old_batch_shape = [2]
mvn, reshape_mvn = self.make_mvn(
dims, new_batch_shape, old_batch_shape)
batch_shape = reshape_mvn.batch_shape_tensor()
event_shape = reshape_mvn.event_shape_tensor()
expected_sample_shape = [3] + new_batch_shape + [dims]
x = mvn.sample(3, seed=62)
expected_sample = array_ops.reshape(x, expected_sample_shape)
actual_sample = reshape_mvn.sample(3, seed=62)
expected_log_prob_shape = [3] + new_batch_shape
expected_log_prob = array_ops.reshape(
mvn.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_mvn.log_prob(expected_sample)
with self.test_session() as sess:
[
batch_shape_,
event_shape_,
expected_sample_, actual_sample_,
expected_log_prob_, actual_log_prob_,
] = sess.run([
batch_shape,
event_shape,
expected_sample, actual_sample,
expected_log_prob, actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([dims], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_mvn.batch_shape)
self.assertAllEqual([dims], reshape_mvn.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_vector_variate_stats(self):
dims = 3
new_batch_shape = [2, 1]
old_batch_shape = [2]
mvn, reshape_mvn = self.make_mvn(
dims, new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_entropy = array_ops.reshape(
mvn.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_mvn.entropy()
expected_vector_stat_shape = new_batch_shape + [dims]
expected_mean = array_ops.reshape(
mvn.mean(), expected_vector_stat_shape)
actual_mean = reshape_mvn.mean()
expected_mode = array_ops.reshape(
mvn.mode(), expected_vector_stat_shape)
actual_mode = reshape_mvn.mode()
expected_stddev = array_ops.reshape(
mvn.stddev(), expected_vector_stat_shape)
actual_stddev = reshape_mvn.stddev()
expected_variance = array_ops.reshape(
mvn.variance(), expected_vector_stat_shape)
actual_variance = reshape_mvn.variance()
expected_matrix_stat_shape = new_batch_shape + [dims, dims]
expected_covariance = array_ops.reshape(
mvn.covariance(), expected_matrix_stat_shape)
actual_covariance = reshape_mvn.covariance()
with self.test_session() as sess:
[
expected_entropy_, actual_entropy_,
expected_mean_, actual_mean_,
expected_mode_, actual_mode_,
expected_stddev_, actual_stddev_,
expected_variance_, actual_variance_,
expected_covariance_, actual_covariance_,
] = sess.run([
expected_entropy, actual_entropy,
expected_mean, actual_mean,
expected_mode, actual_mode,
expected_stddev, actual_stddev,
expected_variance, actual_variance,
expected_covariance, actual_covariance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_covariance_, actual_covariance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_variance.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_covariance.shape)
def test_bad_reshape_size(self):
dims = 2
new_batch_shape = [2, 3]
old_batch_shape = [2] # 2 != 2*3
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape:
with self.assertRaisesRegexp(
ValueError, (r"`batch_shape` size \(6\) must match "
r"`distribution\.batch_shape` size \(2\)")):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.test_session():
with self.assertRaisesOpError(r"Shape sizes do not match."):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample().eval()
def test_non_positive_shape(self):
dims = 2
old_batch_shape = [4]
if self.is_static_shape:
# Unknown first dimension does not trigger size check. Note that
# any dimension < 0 is treated statically as unknown.
new_batch_shape = [-1, 0]
else:
new_batch_shape = [-2, -2] # -2 * -2 = 4, same size as the old shape.
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape:
with self.assertRaisesRegexp(ValueError, r".*must be >=-1.*"):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.test_session():
with self.assertRaisesOpError(r".*must be >=-1.*"):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample().eval()
def test_non_vector_shape(self):
dims = 2
new_batch_shape = 2
old_batch_shape = [2]
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape:
with self.assertRaisesRegexp(ValueError, r".*must be a vector.*"):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.test_session():
with self.assertRaisesOpError(r".*must be a vector.*"):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample().eval()
def test_broadcasting_explicitly_unsupported(self):
old_batch_shape = [4]
new_batch_shape = [1, 4, 1]
rate_ = self.dtype([1, 10, 2, 20])
rate = array_ops.placeholder_with_default(
rate_,
shape=old_batch_shape if self.is_static_shape else None)
poisson_4 = poisson_lib.Poisson(rate)
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
poisson_141_reshaped = batch_reshape_lib.BatchReshape(
poisson_4, new_batch_shape_ph, validate_args=True)
x_4 = self.dtype([2, 12, 3, 23])
x_114 = self.dtype([2, 12, 3, 23]).reshape(1, 1, 4)
if self.is_static_shape:
with self.assertRaisesRegexp(NotImplementedError,
"too few batch and event dims"):
poisson_141_reshaped.log_prob(x_4)
with self.assertRaisesRegexp(NotImplementedError,
"unexpected batch and event shape"):
poisson_141_reshaped.log_prob(x_114)
return
with self.assertRaisesOpError("too few batch and event dims"):
with self.test_session():
poisson_141_reshaped.log_prob(x_4).eval()
with self.assertRaisesOpError("unexpected batch and event shape"):
with self.test_session():
poisson_141_reshaped.log_prob(x_114).eval()
class BatchReshapeStaticTest(_BatchReshapeTest, test.TestCase):
dtype = np.float32
is_static_shape = True
class BatchReshapeDynamicTest(_BatchReshapeTest, test.TestCase):
dtype = np.float64
is_static_shape = False
if __name__ == "__main__":
test.main()
| apache-2.0 |
pkuyym/Paddle | benchmark/paddle/image/smallnet_mnist_cifar.py | 20 | 1367 | #!/usr/bin/env python
from paddle.trainer_config_helpers import *
height = 32
width = 32
num_class = 10
batch_size = get_config_arg('batch_size', int, 128)
args = {'height': height, 'width': width, 'color': True, 'num_class': num_class}
define_py_data_sources2(
"train.list", None, module="provider", obj="process", args=args)
settings(
batch_size=batch_size,
learning_rate=0.01 / batch_size,
learning_method=MomentumOptimizer(0.9),
regularization=L2Regularization(0.0005 * batch_size))
# conv1
net = data_layer('data', size=height * width * 3)
net = img_conv_layer(
input=net,
filter_size=5,
num_channels=3,
num_filters=32,
stride=1,
padding=2)
net = img_pool_layer(input=net, pool_size=3, stride=2, padding=1)
# conv2
net = img_conv_layer(
input=net, filter_size=5, num_filters=32, stride=1, padding=2)
net = img_pool_layer(
input=net, pool_size=3, stride=2, padding=1, pool_type=AvgPooling())
# conv3
net = img_conv_layer(
input=net, filter_size=3, num_filters=64, stride=1, padding=1)
net = img_pool_layer(
input=net, pool_size=3, stride=2, padding=1, pool_type=AvgPooling())
net = fc_layer(input=net, size=64, act=ReluActivation())
net = fc_layer(input=net, size=10, act=SoftmaxActivation())
lab = data_layer('label', num_class)
loss = classification_cost(input=net, label=lab)
outputs(loss)
| apache-2.0 |
meee1/ardupilot | Tools/scripts/rcda_decode.py | 22 | 1487 | #!/usr/bin/env python
'''
decode RCDA messages from a log and optionally play back to a serial port. The RCDA message is
captures RC input bytes when RC_OPTIONS=16 is set
'''
import struct
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--condition", default=None, help="select packets by condition")
parser.add_argument("--baudrate", type=int, default=115200, help="baudrate")
parser.add_argument("--port", type=str, default=None, help="port")
parser.add_argument("--delay-mul", type=float, default=1.0, help="delay multiplier")
parser.add_argument("log", metavar="LOG")
import time
import serial
args = parser.parse_args()
from pymavlink import mavutil
print("Processing log %s" % args.log)
mlog = mavutil.mavlink_connection(args.log)
if args.port:
port = serial.Serial(args.port, args.baudrate, timeout=1.0)
tlast = -1
counter = 0
while True:
msg = mlog.recv_match(type=['RCDA'], condition=args.condition)
if msg is None:
mlog.rewind()
tlast = -1
continue
tnow = msg.TimeUS
if tlast == -1:
tlast = tnow
buf = struct.pack("<IIIIIIIIII",
msg.U0, msg.U1, msg.U2, msg.U3, msg.U4,
msg.U5, msg.U6, msg.U7, msg.U8, msg.U9)[0:msg.Len]
ibuf = [ ord(b) for b in buf ]
dt = tnow - tlast
tlast = tnow
print(len(ibuf), ibuf, dt)
if args.port:
time.sleep(dt*1.0e-6*args.delay_mul)
port.write(buf)
| gpl-3.0 |
dhruvsrivastava/OJ | flask/lib/python2.7/site-packages/alembic/ddl/base.py | 40 | 5924 | import functools
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.schema import DDLElement, Column
from sqlalchemy import Integer
from sqlalchemy import types as sqltypes
from .. import util
# backwards compat
from ..util.sqla_compat import ( # noqa
_table_for_constraint,
_columns_for_constraint, _fk_spec, _is_type_bound, _find_columns)
if util.sqla_09:
from sqlalchemy.sql.elements import quoted_name
class AlterTable(DDLElement):
"""Represent an ALTER TABLE statement.
Only the string name and optional schema name of the table
is required, not a full Table object.
"""
def __init__(self, table_name, schema=None):
self.table_name = table_name
self.schema = schema
class RenameTable(AlterTable):
def __init__(self, old_table_name, new_table_name, schema=None):
super(RenameTable, self).__init__(old_table_name, schema=schema)
self.new_table_name = new_table_name
class AlterColumn(AlterTable):
def __init__(self, name, column_name, schema=None,
existing_type=None,
existing_nullable=None,
existing_server_default=None):
super(AlterColumn, self).__init__(name, schema=schema)
self.column_name = column_name
self.existing_type = sqltypes.to_instance(existing_type) \
if existing_type is not None else None
self.existing_nullable = existing_nullable
self.existing_server_default = existing_server_default
class ColumnNullable(AlterColumn):
def __init__(self, name, column_name, nullable, **kw):
super(ColumnNullable, self).__init__(name, column_name,
**kw)
self.nullable = nullable
class ColumnType(AlterColumn):
def __init__(self, name, column_name, type_, **kw):
super(ColumnType, self).__init__(name, column_name,
**kw)
self.type_ = sqltypes.to_instance(type_)
class ColumnName(AlterColumn):
def __init__(self, name, column_name, newname, **kw):
super(ColumnName, self).__init__(name, column_name, **kw)
self.newname = newname
class ColumnDefault(AlterColumn):
def __init__(self, name, column_name, default, **kw):
super(ColumnDefault, self).__init__(name, column_name, **kw)
self.default = default
class AddColumn(AlterTable):
def __init__(self, name, column, schema=None):
super(AddColumn, self).__init__(name, schema=schema)
self.column = column
class DropColumn(AlterTable):
def __init__(self, name, column, schema=None):
super(DropColumn, self).__init__(name, schema=schema)
self.column = column
@compiles(RenameTable)
def visit_rename_table(element, compiler, **kw):
return "%s RENAME TO %s" % (
alter_table(compiler, element.table_name, element.schema),
format_table_name(compiler, element.new_table_name, element.schema)
)
@compiles(AddColumn)
def visit_add_column(element, compiler, **kw):
return "%s %s" % (
alter_table(compiler, element.table_name, element.schema),
add_column(compiler, element.column, **kw)
)
@compiles(DropColumn)
def visit_drop_column(element, compiler, **kw):
return "%s %s" % (
alter_table(compiler, element.table_name, element.schema),
drop_column(compiler, element.column.name, **kw)
)
@compiles(ColumnNullable)
def visit_column_nullable(element, compiler, **kw):
return "%s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
alter_column(compiler, element.column_name),
"DROP NOT NULL" if element.nullable else "SET NOT NULL"
)
@compiles(ColumnType)
def visit_column_type(element, compiler, **kw):
return "%s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
alter_column(compiler, element.column_name),
"TYPE %s" % format_type(compiler, element.type_)
)
@compiles(ColumnName)
def visit_column_name(element, compiler, **kw):
return "%s RENAME %s TO %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
format_column_name(compiler, element.newname)
)
@compiles(ColumnDefault)
def visit_column_default(element, compiler, **kw):
return "%s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
alter_column(compiler, element.column_name),
"SET DEFAULT %s" %
format_server_default(compiler, element.default)
if element.default is not None
else "DROP DEFAULT"
)
def quote_dotted(name, quote):
"""quote the elements of a dotted name"""
if util.sqla_09 and isinstance(name, quoted_name):
return quote(name)
result = '.'.join([quote(x) for x in name.split('.')])
return result
def format_table_name(compiler, name, schema):
quote = functools.partial(compiler.preparer.quote, force=None)
if schema:
return quote_dotted(schema, quote) + "." + quote(name)
else:
return quote(name)
def format_column_name(compiler, name):
return compiler.preparer.quote(name, None)
def format_server_default(compiler, default):
return compiler.get_column_default_string(
Column("x", Integer, server_default=default)
)
def format_type(compiler, type_):
return compiler.dialect.type_compiler.process(type_)
def alter_table(compiler, name, schema):
return "ALTER TABLE %s" % format_table_name(compiler, name, schema)
def drop_column(compiler, name):
return 'DROP COLUMN %s' % format_column_name(compiler, name)
def alter_column(compiler, name):
return 'ALTER COLUMN %s' % format_column_name(compiler, name)
def add_column(compiler, column, **kw):
return "ADD COLUMN %s" % compiler.get_column_specification(column, **kw)
| bsd-3-clause |
nuuuboo/odoo | addons/hr_timesheet_invoice/report/hr_timesheet_invoice_report.py | 318 | 9494 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.tools.sql import drop_view_if_exists
class report_timesheet_line(osv.osv):
_name = "report.timesheet.line"
_description = "Timesheet Line"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'user_id': fields.many2one('res.users', 'User', readonly=True),
'date': fields.date('Date', readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'quantity': fields.float('Time', readonly=True),
'cost': fields.float('Cost', readonly=True),
'product_id': fields.many2one('product.product', 'Product',readonly=True),
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'general_account_id': fields.many2one('account.account', 'General Account', readonly=True),
'invoice_id': fields.many2one('account.invoice', 'Invoiced', readonly=True),
'month': fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_line')
cr.execute("""
create or replace view report_timesheet_line as (
select
min(l.id) as id,
l.date as date,
to_char(l.date,'YYYY') as name,
to_char(l.date,'MM') as month,
l.user_id,
to_char(l.date, 'YYYY-MM-DD') as day,
l.invoice_id,
l.product_id,
l.account_id,
l.general_account_id,
sum(l.unit_amount) as quantity,
sum(l.amount) as cost
from
account_analytic_line l
where
l.user_id is not null
group by
l.date,
l.user_id,
l.product_id,
l.account_id,
l.general_account_id,
l.invoice_id
)
""")
class report_timesheet_user(osv.osv):
_name = "report_timesheet.user"
_description = "Timesheet per day"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'quantity': fields.float('Time', readonly=True),
'cost': fields.float('Cost', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_user')
cr.execute("""
create or replace view report_timesheet_user as (
select
min(l.id) as id,
to_char(l.date,'YYYY') as name,
to_char(l.date,'MM') as month,
l.user_id,
sum(l.unit_amount) as quantity,
sum(l.amount) as cost
from
account_analytic_line l
where
user_id is not null
group by l.date, to_char(l.date,'YYYY'),to_char(l.date,'MM'), l.user_id
)
""")
class report_timesheet_account(osv.osv):
_name = "report_timesheet.account"
_description = "Timesheet per account"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'quantity': fields.float('Time', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc,account_id desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_account')
cr.execute("""
create or replace view report_timesheet_account as (
select
min(id) as id,
to_char(create_date, 'YYYY') as name,
to_char(create_date,'MM') as month,
user_id,
account_id,
sum(unit_amount) as quantity
from
account_analytic_line
group by
to_char(create_date, 'YYYY'),to_char(create_date, 'MM'), user_id, account_id
)
""")
class report_timesheet_account_date(osv.osv):
_name = "report_timesheet.account.date"
_description = "Daily timesheet per account"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'quantity': fields.float('Time', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc,account_id desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_account_date')
cr.execute("""
create or replace view report_timesheet_account_date as (
select
min(id) as id,
to_char(date,'YYYY') as name,
to_char(date,'MM') as month,
user_id,
account_id,
sum(unit_amount) as quantity
from
account_analytic_line
group by
to_char(date,'YYYY'),to_char(date,'MM'), user_id, account_id
)
""")
class report_timesheet_invoice(osv.osv):
_name = "report_timesheet.invoice"
_description = "Costs to invoice"
_auto = False
_columns = {
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Project', readonly=True),
'manager_id':fields.many2one('res.users', 'Manager', readonly=True),
'quantity': fields.float('Time', readonly=True),
'amount_invoice': fields.float('To invoice', readonly=True)
}
_rec_name = 'user_id'
_order = 'user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_invoice')
cr.execute("""
create or replace view report_timesheet_invoice as (
select
min(l.id) as id,
l.user_id as user_id,
l.account_id as account_id,
a.user_id as manager_id,
sum(l.unit_amount) as quantity,
sum(l.unit_amount * t.list_price) as amount_invoice
from account_analytic_line l
left join hr_timesheet_invoice_factor f on (l.to_invoice=f.id)
left join account_analytic_account a on (l.account_id=a.id)
left join product_product p on (l.to_invoice=f.id)
left join product_template t on (l.to_invoice=f.id)
where
l.to_invoice is not null and
l.invoice_id is null
group by
l.user_id,
l.account_id,
a.user_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
FusionSP/android_external_chromium_org | tools/telemetry/telemetry/core/platform/power_monitor/sysfs_power_monitor_unittest.py | 25 | 5761 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core.platform.power_monitor import sysfs_power_monitor
class SysfsPowerMonitorMonitorTest(unittest.TestCase):
initial_freq = {
'cpu0': '1700000 6227\n1600000 0\n1500000 0\n1400000 28\n1300000 22\n'
'1200000 14\n1100000 19\n1000000 22\n900000 14\n800000 20\n'
'700000 15\n600000 23\n500000 23\n400000 9\n300000 28\n200000 179',
'cpu1': '1700000 11491\n1600000 0\n1500000 0\n1400000 248\n1300000 1166\n'
'1200000 2082\n1100000 2943\n1000000 6560\n900000 12517\n'
'800000 8690\n700000 5105\n600000 3800\n500000 5131\n400000 5479\n'
'300000 7571\n200000 133618',
'cpu2': '1700000 1131',
'cpu3': '1700000 1131'
}
final_freq = {
'cpu0': '1700000 7159\n1600000 0\n1500000 0\n1400000 68\n1300000 134\n'
'1200000 194\n1100000 296\n1000000 716\n900000 1301\n800000 851\n'
'700000 554\n600000 343\n500000 612\n400000 691\n300000 855\n'
'200000 15525',
'cpu1': '1700000 12048\n1600000 0\n1500000 0\n1400000 280\n1300000 1267\n'
'1200000 2272\n1100000 3163\n1000000 7039\n900000 13800\n'
'800000 9599\n700000 5655\n600000 4144\n500000 5655\n400000 6005\n'
'300000 8288\n200000 149724',
'cpu2': None,
'cpu3': ''
}
expected_initial_freq = {
'cpu0': {
1700000000: 6227,
1600000000: 0,
1500000000: 0,
1400000000: 28,
1300000000: 22,
1200000000: 14,
1100000000: 19,
1000000000: 22,
900000000: 14,
800000000: 20,
700000000: 15,
600000000: 23,
500000000: 23,
400000000: 9,
300000000: 28,
200000000: 179
},
'cpu1': {
1700000000: 11491,
1600000000: 0,
1500000000: 0,
1400000000: 248,
1300000000: 1166,
1200000000: 2082,
1100000000: 2943,
1000000000: 6560,
900000000: 12517,
800000000: 8690,
700000000: 5105,
600000000: 3800,
500000000: 5131,
400000000: 5479,
300000000: 7571,
200000000: 133618
},
'cpu2': {
1700000000: 1131
},
'cpu3': {
1700000000: 1131
}
}
expected_final_freq = {
'cpu0': {
1700000000: 7159,
1600000000: 0,
1500000000: 0,
1400000000: 68,
1300000000: 134,
1200000000: 194,
1100000000: 296,
1000000000: 716,
900000000: 1301,
800000000: 851,
700000000: 554,
600000000: 343,
500000000: 612,
400000000: 691,
300000000: 855,
200000000: 15525
},
'cpu1': {
1700000000: 12048,
1600000000: 0,
1500000000: 0,
1400000000: 280,
1300000000: 1267,
1200000000: 2272,
1100000000: 3163,
1000000000: 7039,
900000000: 13800,
800000000: 9599,
700000000: 5655,
600000000: 4144,
500000000: 5655,
400000000: 6005,
300000000: 8288,
200000000: 149724
},
'cpu2': None,
'cpu3': {}
}
expected_freq_percents = {
'whole_package': {
1700000000: 3.29254111574526,
1600000000: 0.0,
1500000000: 0.0,
1400000000: 0.15926805099535601,
1300000000: 0.47124116307273645,
1200000000: 0.818756100807525,
1100000000: 1.099381692400982,
1000000000: 2.5942528544384302,
900000000: 5.68661122326737,
800000000: 3.850545467654628,
700000000: 2.409691872245393,
600000000: 1.4693702487650486,
500000000: 2.4623575553879373,
400000000: 2.672038150383057,
300000000: 3.415770495015825,
200000000: 69.59817400982045
},
'cpu0': {
1700000000: 4.113700564971752,
1600000000: 0.0,
1500000000: 0.0,
1400000000: 0.1765536723163842,
1300000000: 0.4943502824858757,
1200000000: 0.7944915254237288,
1100000000: 1.2226341807909604,
1000000000: 3.0632062146892656,
900000000: 5.680614406779661,
800000000: 3.6679025423728815,
700000000: 2.379060734463277,
600000000: 1.4124293785310735,
500000000: 2.599752824858757,
400000000: 3.0102401129943503,
300000000: 3.650247175141243,
200000000: 67.73481638418079
},
'cpu1': {
1700000000: 2.4713816665187682,
1600000000: 0.0,
1500000000: 0.0,
1400000000: 0.1419824296743278,
1300000000: 0.44813204365959713,
1200000000: 0.8430206761913214,
1100000000: 0.9761292040110037,
1000000000: 2.1252994941875945,
900000000: 5.69260803975508,
800000000: 4.033188392936374,
700000000: 2.4403230100275093,
600000000: 1.526311118999024,
500000000: 2.3249622859171177,
400000000: 2.3338361877717633,
300000000: 3.1812938148904073,
200000000: 71.46153163546012
},
'cpu2': {
1700000000: 0.0,
},
'cpu3': {
1700000000: 0.0,
}
}
def testParseCpuFreq(self):
initial = sysfs_power_monitor.SysfsPowerMonitor.ParseFreqSample(
self.initial_freq)
final = sysfs_power_monitor.SysfsPowerMonitor.ParseFreqSample(
self.final_freq)
self.assertDictEqual(initial, self.expected_initial_freq)
self.assertDictEqual(final, self.expected_final_freq)
def testComputeCpuStats(self):
results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats(
self.expected_initial_freq, self.expected_final_freq)
for cpu in self.expected_freq_percents:
for freq in results[cpu]:
self.assertAlmostEqual(results[cpu][freq],
self.expected_freq_percents[cpu][freq])
| bsd-3-clause |
hydroffice/hyo_soundspeed | examples/soundspeed/ex_read_and_write.py | 1 | 1724 | import os
import logging
from hyo2.soundspeedmanager import app_info
from hyo2.soundspeed.soundspeed import SoundSpeedLibrary
from hyo2.soundspeed.base.testing import SoundSpeedTesting
from hyo2.soundspeed.base.callbacks.fake_callbacks import FakeCallbacks
from hyo2.abc.lib.logging import set_logging
ns_list = ["hyo2.soundspeed", "hyo2.soundspeedmanager", "hyo2.soundspeedsettings"]
set_logging(ns_list=ns_list)
logger = logging.getLogger(__name__)
# create a project with test-callbacks
lib = SoundSpeedLibrary(callbacks=FakeCallbacks())
# set the current project name
lib.setup.current_project = 'test'
data_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
testing = SoundSpeedTesting(root_folder=data_folder)
# retrieve data input/output folders
data_input = testing.input_data_folder()
logger.info('input folder: %s' % data_input)
data_output = testing.output_data_folder()
logger.info('output folder: %s' % data_output)
# test readers/writers
logger.info('test: *** START ***')
filters = ["valeport", ]
formats = ["caris", "csv", "elac", "hypack", "ixblue", "asvp", "qps", "sonardyne", "unb", ]
data_outputs = dict()
for format in formats:
data_outputs[format] = data_output
tests = testing.input_dict_test_files(inclusive_filters=filters)
# print(tests)
# import each identified file
for idx, testfile in enumerate(tests.keys()):
logger.info("test: * New profile: #%03d *" % idx)
# import
lib.import_data(data_path=testfile, data_format=tests[testfile].name)
# export
# lib.export_data(data_path=data_output, data_formats=lib.name_writers)
lib.export_data(data_paths=data_outputs, data_formats=formats)
logger.info('test: *** END ***')
| lgpl-2.1 |
tarzan0820/odoo | addons/account/project/__init__.py | 427 | 1100 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
luisleao/hackathononibussp | scripts/gtfs_calculos.py | 1 | 8583 | #!/usr/bin/python
import sys
import os
import json
import pyes, pprint
import codecs
import math
from math import radians, sin, cos, sqrt, atan2
from datetime import datetime, timedelta
from os import listdir
from StringIO import StringIO
def save_file(filename, json_data):
print "salvando '%s'..." % filename
with open(filename, "w") as the_file:
the_file.write(json.dumps(json_data, encoding="utf-8")) #iso-8859-1"))
print "file saved"
#iso-8859-1
lista_linhas = []
linhas = {}
stop_times = {}
frequencies = {}
shapes = {}
R = 6371; # km
def haversine_distance(p0, p1):
lat0 = p0[0]
lon0 = p0[1]
lat1 = p1[0]
lon1 = p1[1]
dLat = radians(lat1-lat0)
dLon = radians(lon1-lon0)
lat0 = radians(lat0)
lat1 = radians(lat1)
a = sin(dLat/2) * sin(dLat/2) + sin(dLon/2) * sin(dLon/2) * cos(lat0) * cos(lat1)
c = 2 * atan2(sqrt(a), sqrt(1-a));
d = R * c
return d*1000
def getTime(val):
tm1 = map(int, val.split(':'))
return timedelta(hours=tm1[0], minutes=tm1[1], seconds=tm1[2])
def get_routes(): #routes.txt
arquivo = "../amostra/SPTrans GTFS/routes.txt"
with codecs.open(arquivo, 'r', encoding='UTF-8') as raw:
#with open(decoded_file, 'r') as raw:
for a in raw.readlines()[1:]:
b = a.replace('\r\n', '').split(',')
#"route_id","agency_id","route_short_name","route_long_name","route_type","route_color","route_text_color"
#"1016-10","1","1016-10","Cemiterio Do Horto - Center Norte",3,"",""
id = b[0].replace("\"", "")
name = b[3].replace("\"", "")
if not linhas.has_key(id):
linhas[id] = {
"id": id,
"name": name,
"sentidos": {},
}
tokens = name.replace("- ", "").split(" ")
tokens.append(id)
lista_linhas.append({
"value": id,
"full_name": name,
"tokens": tokens
})
def get_trips(): #trips.txt
arquivo = "../amostra/SPTrans GTFS/trips.txt"
with codecs.open(arquivo, 'r', encoding='UTF-8') as raw:
#with open(decoded_file, 'r') as raw:
for a in raw.readlines()[1:]:
b = a.replace('\r\n', '').split(',')
#"route_id","service_id","trip_id","trip_headsign","direction_id","shape_id"
#"1016-10","USD","1016-10-0","Center Norte",0,42746
id = b[0].replace("\"", "")
working = b[1].replace("\"", "")
name = b[3].replace("\"", "")
sentido = b[4]
shape_id = int(b[5])
if linhas.has_key(id):
item = linhas[id]
item["sentidos"][sentido] = {
"name": name, #//GTFS:trips - headsign
"shape_id": shape_id, #//GTFS:trips - id do shape
"shapes": None, #//GTFS:shapes array
"travel_time": 0, #//calculo: GTFS:stop_times - tempo de percurso em segundos
"travel_discance": 0, #//calculo: GTFS:shapes - distancia do percurso em metros
"travels": [0] * 24, #//calculo: GTFS:frequencies 00-23h: 3600/headway_secs
"total_travels": 0, #//calculo: GTFS:frequencies 00-23h: SUM(3600/headway_secs)
"working": working #//GTFS:trips: USD (Util, Sabado, Domingo)
}
def get_travel_times(): #stop_times.txt
arquivo = "../amostra/SPTrans GTFS/stop_times.txt"
with codecs.open(arquivo, 'r', encoding='UTF-8') as raw:
#with open(decoded_file, 'r') as raw:
for a in raw.readlines()[1:]:
b = a.replace('\r\n', '').split(',')
#print b
#"trip_id","arrival_time","departure_time","stop_id","stop_sequence"
#[u'"978J-10-0"', u'"06:11:54"', u'"06:11:54"', u'410003639', u'8']
id_completo = b[0].replace("\"", "")
id = id_completo[0:-2]
sentido = id_completo[-1:]
arrival_time = b[1].replace("\"", "")
departure_time = b[2].replace("\"", "")
stop_id = b[3]
stop_sequence = b[4]
arrival_timestamp = getTime(arrival_time)
departure_timestamp = getTime(departure_time)
if not stop_times.has_key(id):
stop_times[id] = {}
item = stop_times[id]
if not item.has_key(sentido):
item[sentido] = {
"start": timedelta(),
"stop": timedelta(),
"last_sequence": 0,
"duration": 0
}
if (stop_sequence == 1):
item[sentido]["start"] = arrival_timestamp
if (stop_sequence > item[sentido]["last_sequence"]):
item[sentido]["last_sequence"] = stop_sequence
item[sentido]["stop"] = departure_timestamp
# calculando o tempo da viagem (por sentido)
for id in stop_times:
if linhas.has_key(id):
linha = linhas[id]
item = stop_times[id]
for sentido in item:
travel_time = item[sentido]["stop"] - item[sentido]["start"]
linha["sentidos"][sentido]["travel_time"] = travel_time.total_seconds()
#print "%s: %s - %s." % (id, sentido, str(item[sentido]["duration"] ))
def get_frequencies(): #frequencies.txt"
arquivo = "../amostra/SPTrans GTFS/frequencies.txt"
with codecs.open(arquivo, 'r', encoding='UTF-8') as raw:
#with open(decoded_file, 'r') as raw:
for a in raw.readlines()[1:]:
b = a.replace('\r\n', '').split(',')
#print b
#"trip_id","start_time","end_time","headway_secs"
#"1016-10-0","00:00:00","00:59:00",1800
id_completo = b[0].replace("\"", "")
id = id_completo[0:-2]
sentido = id_completo[-1:]
start_time = b[1].replace("\"", "")
end_time = b[2].replace("\"", "")
headway_secs = int(b[3])
travels = 3600/headway_secs
start_timestamp = int(getTime(start_time).total_seconds() // 3600)
if linhas.has_key(id):
linha = linhas[id]
if linha["sentidos"].has_key(sentido):
item = linha["sentidos"][sentido]
item["total_travels"] += travels
item["travels"][start_timestamp] = travels
def get_shapes(): #shapes.txt
arquivo = "../amostra/SPTrans GTFS/shapes.txt"
with codecs.open(arquivo, 'r', encoding='UTF-8') as raw:
#with open(decoded_file, 'r') as raw:
for a in raw.readlines()[1:]:
b = a.replace('\r\n', '').split(',')
#print b
#"shape_id","shape_pt_lat","shape_pt_lon","shape_pt_sequence","shape_dist_traveled"
#42746,-23.446799,-46.611059,1,0
#42746,-23.446665,-46.612229,4,120.57015
shape_id = int(b[0])
shape_pt_lat = float(b[1])
shape_pt_lon = float(b[2])
shape_pt_sequence = int(b[3])
#shape_dist_traveled = float(b[4])
if not shapes.has_key(shape_id):
shapes[shape_id] = {
"total_distance_traveled": 0,
"last_point": 0,
"points": []
}
p0 = None
p1 = None
p1 = (float(shape_pt_lat), float(shape_pt_lon))
if(p0 != None and p1 != None):
d = haversine_distance(p0, p1)
else:
d = 0
#raw_input("continue...")
p0 = p1
#print d
#print total_distance_traveled
#sys.stdout.write(str(total_distance_traveled))
#sys.stdout.write('\r')
#raw_input("continue...")
shape = shapes[shape_id]
#shape["total_distance_traveled"] += shape_dist_traveled
shape["last_point"] = shape_pt_sequence
shape["total_distance_traveled"] = shape["total_distance_traveled"]+d
shape["points"].append({
"lat": shape_pt_lat,
"lng": shape_pt_lon,
"sequence": shape_pt_sequence,
"distance_traveled": d
})
for id in linhas:
linha = linhas[id]
for sentido in linha["sentidos"]:
item = linha["sentidos"][sentido]
if item["shape_id"] in shapes:
linha["sentidos"][sentido]["shapes"] = shapes[item["shape_id"]]
#for shape_id in shapes:
# print "%s %s %s" % (shape_id, shapes[shape_id]["total_distance_traveled"], len(shapes[shape_id]["shapes"]))
get_routes()
get_trips()
get_travel_times()
get_frequencies()
save_file("../web/data/linhas.json", linhas)
save_file("../web/data/lista_linhas.json", lista_linhas)
get_shapes()
#print float('120.57015')
for id in linhas:
save_file("../web/data/linhas/%s.json" % id, linhas[id])
print "*** FIM ***"
def distance_on_unit_sphere(lat1, long1, lat2, long2):
# Convert latitude and longitude to
# spherical coordinates in radians.
degrees_to_radians = math.pi/180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1)*degrees_to_radians
phi2 = (90.0 - lat2)*degrees_to_radians
# theta = longitude
theta1 = long1*degrees_to_radians
theta2 = long2*degrees_to_radians
# Compute spherical distance from spherical coordinates.
# For two locations in spherical coordinates
# (1, theta, phi) and (1, theta, phi)
# cosine( arc length ) =
# sin phi sin phi' cos(theta-theta') + cos phi cos phi'
# distance = rho * arc length
cos = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) +
math.cos(phi1)*math.cos(phi2))
arc = math.acos( cos )
# Remember to multiply arc by the radius of the earth
# in your favorite set of units to get length.
return arc | apache-2.0 |
tomduijf/home-assistant | homeassistant/components/device_tracker/netgear.py | 7 | 3242 | """
homeassistant.components.device_tracker.netgear
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Device tracker platform that supports scanning a Netgear router for device
presence.
Configuration:
To use the Netgear tracker you will need to add something like the following
to your configuration.yaml file.
device_tracker:
platform: netgear
host: YOUR_ROUTER_IP
username: YOUR_ADMIN_USERNAME
password: YOUR_ADMIN_PASSWORD
Variables:
host
*Required
The IP address of your router, e.g. 192.168.1.1.
username
*Required
The username of an user with administrative privileges, usually 'admin'.
password
*Required
The password for your given admin account.
"""
import logging
from datetime import timedelta
import threading
from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD
from homeassistant.util import Throttle
from homeassistant.components.device_tracker import DOMAIN
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['pynetgear==0.3']
def get_scanner(hass, config):
""" Validates config and returns a Netgear scanner. """
info = config[DOMAIN]
host = info.get(CONF_HOST)
username = info.get(CONF_USERNAME)
password = info.get(CONF_PASSWORD)
if password is not None and host is None:
_LOGGER.warning('Found username or password but no host')
return None
scanner = NetgearDeviceScanner(host, username, password)
return scanner if scanner.success_init else None
class NetgearDeviceScanner(object):
""" This class queries a Netgear wireless router using the SOAP-API. """
def __init__(self, host, username, password):
import pynetgear
self.last_results = []
self.lock = threading.Lock()
if host is None:
self._api = pynetgear.Netgear()
elif username is None:
self._api = pynetgear.Netgear(password, host)
else:
self._api = pynetgear.Netgear(password, host, username)
_LOGGER.info("Logging in")
results = self._api.get_attached_devices()
self.success_init = results is not None
if self.success_init:
self.last_results = results
else:
_LOGGER.error("Failed to Login")
def scan_devices(self):
"""
Scans for new devices and return a list containing found device ids.
"""
self._update_info()
return (device.mac for device in self.last_results)
def get_device_name(self, mac):
""" Returns the name of the given device or None if we don't know. """
try:
return next(device.name for device in self.last_results
if device.mac == mac)
except StopIteration:
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""
Retrieves latest information from the Netgear router.
Returns boolean if scanning successful.
"""
if not self.success_init:
return
with self.lock:
_LOGGER.info("Scanning")
self.last_results = self._api.get_attached_devices() or []
| mit |
ezequielpereira/Time-Line | libs/wx/lib/floatcanvas/Utilities/Colors.py | 9 | 4097 | #!/usr/bin/env python
"""
Colors.py
Assorted stuff for Colors support. At the moment, only a few color sets.
Many of these are from:
http://geography.uoregon.edu/datagraphics/color_scales.htm
They may have been modified some
CategoricalColor1: A list of colors that are distict.
BlueToRed11: 11 colors from blue to red
"""
## Categorical 12-step scheme, after ColorBrewer 11-step Paired Scheme
## From: http://geography.uoregon.edu/datagraphics/color_scales.htm
# CategoricalColor1 = [ (255, 191, 127),
# (255, 127, 0),
# (255, 255, 153),
# (255, 255, 50),
# (178, 255, 140),
# ( 50, 255, 0),
# (165, 237, 255),
# (25, 178, 255),
# (204, 191, 255),
# (101, 76, 255),
# (255, 153, 191),
# (229, 25, 50),
# ]
CategoricalColor1 = [ (229, 25, 50),
(101, 76, 255),
( 50, 255, 0),
(255, 127, 0),
(255, 255, 50),
(255, 153, 191),
(25, 178, 255),
(178, 255, 140),
(255, 191, 127),
(204, 191, 255),
(165, 237, 255),
(255, 255, 153),
]
RedToBlue11 = [ (165, 0, 33),
(216, 38, 50),
(247, 109, 94),
(255, 173, 114),
(255, 224, 153),
(255, 255, 191),
(224, 255, 255),
(170, 247, 255),
(114, 216, 255),
( 63, 160, 255),
( 38, 76, 255),
]
BlueToDarkRed12 = [( 41, 10, 216),
( 38, 77, 255),
( 63, 160, 255),
(114, 217, 255),
(170, 247, 255),
(224, 255, 255),
(255, 255, 191),
(255, 224, 153),
(255, 173, 114),
(247, 109, 94),
(216, 38, 50),
(165, 0, 33),
]
BlueToDarkRed10 = [( 41, 10, 216),
( 38, 77, 255),
( 63, 160, 255),
(114, 217, 255),
(170, 247, 255),
(255, 224, 153),
(255, 173, 114),
(247, 109, 94),
(216, 38, 50),
(165, 0, 33),
]
BlueToDarkRed8 = [( 41, 10, 216),
( 38, 77, 255),
( 63, 160, 255),
(114, 217, 255),
(255, 173, 114),
(247, 109, 94),
(216, 38, 50),
(165, 0, 33),
]
if __name__ == "__main__":
import wx
# tiny test app
AllSchemes = [("CategoricalColor1", CategoricalColor1),
("RedToBlue11", RedToBlue11),
("BlueToDarkRed12", BlueToDarkRed12),
("BlueToDarkRed10", BlueToDarkRed10),
("BlueToDarkRed8", BlueToDarkRed8)
]
class TestFrame(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
Hsizer = wx.BoxSizer(wx.HORIZONTAL)
for scheme in AllSchemes:
Sizer = wx.BoxSizer(wx.VERTICAL)
Sizer.Add(wx.StaticText(self, label=scheme[0]), 0, wx.ALL, 5)
for c in scheme[1]:
w = wx.Window(self, size=(100, 20))
w.SetBackgroundColour(wx.Colour(*c))
Sizer.Add(w, 0, wx.ALL, 5)
Hsizer.Add(Sizer, 0, wx.ALL, 5)
self.SetSizerAndFit(Hsizer)
self.Show()
A = wx.App(False)
F = TestFrame(None)
A.MainLoop()
| gpl-3.0 |
jspraul/bite-project | tools/bugs/server/appengine/providers/crawler_base.py | 17 | 1081 | # Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for Crawler objects."""
__author__ = 'jason.stredwick@gmail.com (Jason Stredwick)'
class Error(Exception):
pass
class CrawlerBase(object):
"""Crawler base class
Crawlers are responsible for retrieving relevant bug data from a specific
provider database.
Attributes:
max_retries: The maximum number of attempts a crawl.
"""
def __init__(self, max_retries=3):
self.max_retries = max_retries
def Crawl(self):
raise NotImplementedError
| apache-2.0 |
salfab/CouchPotatoServer | couchpotato/core/providers/torrent/hdbits/main.py | 9 | 2136 | from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
import traceback
log = CPLog(__name__)
class HDBits(TorrentProvider):
urls = {
'test' : 'https://hdbits.org/',
'login' : 'https://hdbits.org/login/doLogin/',
'detail' : 'https://hdbits.org/details.php?id=%s&source=browse',
'search' : 'https://hdbits.org/json_search.php?imdb=%s',
'download' : 'https://hdbits.org/download.php/%s.torrent?id=%s&passkey=%s&source=details.browse',
'login_check': 'http://hdbits.org/inbox.php',
}
http_time_between_calls = 1 #seconds
def _search(self, movie, quality, results):
data = self.getJsonData(self.urls['search'] % movie['library']['identifier'], opener = self.login_opener)
if data:
try:
for result in data:
results.append({
'id': result['id'],
'name': result['title'],
'url': self.urls['download'] % (result['id'], result['id'], self.conf('passkey')),
'detail_url': self.urls['detail'] % result['id'],
'size': self.parseSize(result['size']),
'seeders': tryInt(result['seeder']),
'leechers': tryInt(result['leecher'])
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
data = self.getHTMLData('https://hdbits.org/login')
bs = BeautifulSoup(data)
secret = bs.find('input', attrs = {'name': 'lol'})['value']
return tryUrlencode({
'uname': self.conf('username'),
'password': self.conf('password'),
'lol': secret
})
def loginSuccess(self, output):
return '/logout.php' in output.lower()
loginCheckSuccess = loginSuccess
| gpl-3.0 |
knowsis/django | django/test/client.py | 13 | 23477 | from __future__ import unicode_literals
import sys
import os
import re
import mimetypes
from copy import copy
from io import BytesIO
from django.conf import settings
from django.contrib.auth import authenticate, login, logout, get_user_model
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import (request_started, request_finished,
got_request_exception)
from django.db import close_old_connections
from django.http import SimpleCookie, HttpRequest, QueryDict
from django.template import TemplateDoesNotExist
from django.test import signals
from django.utils.functional import curry
from django.utils.encoding import force_bytes, force_str
from django.utils.http import urlencode
from django.utils.importlib import import_module
from django.utils.itercompat import is_iterable
from django.utils import six
from django.utils.six.moves.urllib.parse import unquote, urlparse, urlsplit
from django.test.utils import ContextList
__all__ = ('Client', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class FakePayload(object):
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
for item in iterable:
yield item
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes.
Uses the WSGI interface to compose requests, but returns
the raw HttpResponse object
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
from django.conf import settings
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
response = self.get_response(request)
# We're emulating a WSGI server; we must call the close method
# on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, six.string_types) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
]])
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
]])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
if hasattr(file, 'content_type'):
content_type = file.content_type
else:
content_type = mimetypes.guess_type(file.name)[0]
if content_type is None:
content_type = 'application/octet-stream'
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"' \
% (key, os.path.basename(file.name))),
to_bytes('Content-Type: %s' % content_type),
b'',
file.read()
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': str('/'),
'REMOTE_ADDR': str('127.0.0.1'),
'REQUEST_METHOD': str('GET'),
'SCRIPT_NAME': str(''),
'SERVER_NAME': str('testserver'),
'SERVER_PORT': str('80'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type, ):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _get_path(self, parsed):
path = force_str(parsed[2])
# If there are parameters, add them
if parsed[3]:
path += str(";") + force_str(parsed[3])
path = unquote(path)
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
path = path.encode('utf-8').decode('iso-8859-1')
return path
def get(self, path, data={}, **extra):
"Construct a GET request."
parsed = urlparse(path)
query_string = urlencode(data, doseq=True) or force_str(parsed[4])
if six.PY3:
query_string = query_string.encode('utf-8').decode('iso-8859-1')
r = {
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': query_string,
'REQUEST_METHOD': str('GET'),
}
r.update(extra)
return self.request(**r)
def post(self, path, data={}, content_type=MULTIPART_CONTENT,
**extra):
"Construct a POST request."
post_data = self._encode_data(data, content_type)
parsed = urlparse(path)
query_string = force_str(parsed[4])
if six.PY3:
query_string = query_string.encode('utf-8').decode('iso-8859-1')
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': query_string,
'REQUEST_METHOD': str('POST'),
'wsgi.input': FakePayload(post_data),
}
r.update(extra)
return self.request(**r)
def head(self, path, data={}, **extra):
"Construct a HEAD request."
parsed = urlparse(path)
query_string = urlencode(data, doseq=True) or force_str(parsed[4])
if six.PY3:
query_string = query_string.encode('utf-8').decode('iso-8859-1')
r = {
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': query_string,
'REQUEST_METHOD': str('HEAD'),
}
r.update(extra)
return self.request(**r)
def options(self, path, data='', content_type='application/octet-stream',
**extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type, **extra)
def put(self, path, data='', content_type='application/octet-stream',
**extra):
"Construct a PUT request."
return self.generic('PUT', path, data, content_type, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
**extra):
"Construct a PATCH request."
return self.generic('PATCH', path, data, content_type, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
**extra):
"Construct a DELETE request."
return self.generic('DELETE', path, data, content_type, **extra)
def generic(self, method, path,
data='', content_type='application/octet-stream', **extra):
parsed = urlparse(path)
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': str(method),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': str(content_type),
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
query_string = force_bytes(parsed[4])
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
query_string = query_string.decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
if cookie:
return engine.SessionStore(cookie.value)
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signals.template_rendered.connect(on_template_render, dispatch_uid="template-render")
# Capture exceptions created by the handler.
got_request_exception.connect(self.store_exc_info, dispatch_uid="request-exception")
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
six.reraise(*exc_info)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid="template-render")
got_request_exception.disconnect(dispatch_uid="request-exception")
def get(self, path, data={}, follow=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data={}, follow=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path,
data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path,
data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, **extra):
"""
Send a resource to the server using PATCH.
"""
response = super(Client, self).patch(
path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(path,
data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
user = authenticate(**credentials)
if user and user.is_active \
and 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
return True
else:
return False
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
UserModel = get_user_model()
if self.session:
request.session = self.session
uid = self.session.get("_auth_user_id")
if uid:
request.user = UserModel._default_manager.get(pk=uid)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((url, response.status_code))
url = urlsplit(url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
response = self.get(url.path, QueryDict(url.query), follow=False, **extra)
response.redirect_chain = redirect_chain
# Prevent loops
if response.redirect_chain[-1] in response.redirect_chain[0:-1]:
break
return response
| bsd-3-clause |
Nexenta/s3-tests | virtualenv/lib/python2.7/site-packages/requests/packages/chardet/eucjpprober.py | 215 | 3506 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants, sys
from constants import eStart, eError, eItsMe
from mbcharsetprober import MultiByteCharSetProber
from codingstatemachine import CodingStateMachine
from chardistribution import EUCJPDistributionAnalysis
from jpcntx import EUCJPContextAnalysis
from mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == eError:
if constants._debug:
sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n')
self._mState = constants.eNotMe
break
elif codingState == eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i-1:i+1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i-1:i+1], charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if self._mContextAnalyzer.got_enough_data() and \
(self.get_confidence() > constants.SHORTCUT_THRESHOLD):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| mit |
Laurawly/tvm-1 | python/tvm/topi/cuda/conv2d_nhwc.py | 2 | 4966 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements, unused-argument
"""Direct conv2d in NHWC layout"""
import tvm
from tvm import te
from tvm import autotvm
from ..utils import get_const_tuple
def schedule_conv2d_nhwc_direct(cfg, s, Conv):
"""schedule optimized for NHWC direct conv2d"""
pad_data, kernel = s[Conv].op.input_tensors
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
if Conv.op in s.outputs:
output = Conv
OL = s.cache_write(Conv, "local")
else:
output = s.outputs[0].output(0)
s[Conv].set_scope("local")
OL = Conv
# create cache stage
AA = s.cache_read(pad_data, "shared", [OL])
WW = s.cache_read(kernel, "shared", [OL])
AL = s.cache_read(AA, "local", [OL])
WL = s.cache_read(WW, "local", [OL])
# Schedule for autotvm
cfg.define_knob("tile_n", [2, 4, 8])
cfg.define_knob("tile_c", [2, 4, 8])
cfg.define_knob("num_thread_n", [4, 8, 16])
cfg.define_knob("num_thread_c", [4, 8, 16])
cfg.define_knob("vthread_n", [1, 2])
cfg.define_knob("vthread_c", [1, 2])
cfg.define_knob("step", [16, 3, 32, 64])
# fallback support
target = tvm.target.Target.current()
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.kind.name, target.model, "conv2d_nhwc.cuda"
)
cfg.fallback_with_reference_log(ref_log)
tile_n = cfg["tile_n"].val
tile_c = cfg["tile_c"].val
num_thread_n = cfg["num_thread_n"].val
num_thread_c = cfg["num_thread_c"].val
vthread_n = cfg["vthread_n"].val
vthread_c = cfg["vthread_c"].val
step = cfg["step"].val
block_factor_c = tile_c * num_thread_c * vthread_c
offset = 8
A_align = step + offset
W_align = block_factor_c + offset
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis((0, num_thread_c), "threadIdx.x")
thread_y = te.thread_axis((0, num_thread_n), "threadIdx.y")
thread_xz = te.thread_axis((0, vthread_c), "vthread", name="vx")
thread_yz = te.thread_axis((0, vthread_n), "vthread", name="vy")
# Schedule for output
ni, hi, wi, fi = s[output].op.axis
bz = s[output].fuse(hi, wi)
tx, fi = s[output].split(fi, factor=tile_c)
txz, tx = s[output].split(tx, factor=num_thread_c)
bx, txz = s[output].split(txz, factor=vthread_c)
ty, ni = s[output].split(ni, factor=tile_n)
tyz, ty = s[output].split(ty, factor=num_thread_n)
by, tyz = s[output].split(tyz, factor=vthread_n)
s[output].reorder(bz, by, bx, tyz, txz, ty, tx, ni, fi)
s[output].bind(bz, block_z)
s[output].bind(by, block_y)
s[output].bind(bx, block_x)
s[output].bind(tyz, thread_yz)
s[output].bind(txz, thread_xz)
s[output].bind(ty, thread_y)
s[output].bind(tx, thread_x)
# Schedule local computation
s[OL].compute_at(s[output], tx)
ni, yi, xi, fi = s[OL].op.axis
ry, rx, rc = s[OL].op.reduce_axis
rco, rci = s[OL].split(rc, factor=step)
s[OL].reorder(rco, ry, rx, rci, ni, fi)
s[AA].compute_at(s[OL], rx)
s[WW].compute_at(s[OL], rx)
s[AL].compute_at(s[OL], rci)
s[WL].compute_at(s[OL], rci)
# Schedule for data's share memory
ni, yi, xi, ci = s[AA].op.axis
s[AA].reorder(yi, xi, ni, ci)
s[AA].storage_align(xi, A_align - 1, A_align)
t = s[AA].fuse(ni, ci)
ty, tx = s[AA].split(t, factor=num_thread_c)
_, ty = s[AA].split(ty, factor=num_thread_n)
s[AA].bind(tx, thread_x)
s[AA].bind(ty, thread_y)
# Schedule for kernel's share memory
_, _, ic, o = s[WW].op.axis
t = s[WW].fuse(ic, o)
s[WW].storage_align(ic, W_align - 1, W_align)
ty, tx = s[WW].split(t, factor=num_thread_c)
_, ty = s[WW].split(ty, factor=num_thread_n)
s[WW].bind(tx, thread_x)
s[WW].bind(ty, thread_y)
N, OH, OW, CO = get_const_tuple(output.shape)
KH, KW, CI, _ = get_const_tuple(kernel.shape)
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW)
| apache-2.0 |
fu3kingt3pe/spiderfoot | ext/stem/control.py | 10 | 128760 | # Copyright 2011-2015, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Module for interacting with the Tor control socket. The
:class:`~stem.control.Controller` is a wrapper around a
:class:`~stem.socket.ControlSocket`, retaining many of its methods (connect,
close, is_alive, etc) in addition to providing its own for working with the
socket at a higher level.
Stem has `several ways <../faq.html#how-do-i-connect-to-tor>`_ of getting a
:class:`~stem.control.Controller`, but the most flexible are
:func:`~stem.control.Controller.from_port` and
:func:`~stem.control.Controller.from_socket_file`. These static
:class:`~stem.control.Controller` methods give you an **unauthenticated**
Controller you can then authenticate yourself using its
:func:`~stem.control.Controller.authenticate` method. For example...
::
import getpass
import sys
import stem
import stem.connection
from stem.control import Controller
if __name__ == '__main__':
try:
controller = Controller.from_port()
except stem.SocketError as exc:
print("Unable to connect to tor on port 9051: %s" % exc)
sys.exit(1)
try:
controller.authenticate()
except stem.connection.MissingPassword:
pw = getpass.getpass("Controller password: ")
try:
controller.authenticate(password = pw)
except stem.connection.PasswordAuthFailed:
print("Unable to authenticate, password is incorrect")
sys.exit(1)
except stem.connection.AuthenticationFailure as exc:
print("Unable to authenticate: %s" % exc)
sys.exit(1)
print("Tor is running version %s" % controller.get_version())
controller.close()
If you're fine with allowing your script to raise exceptions then this can be more nicely done as...
::
from stem.control import Controller
if __name__ == '__main__':
with Controller.from_port() as controller:
controller.authenticate()
print("Tor is running version %s" % controller.get_version())
**Module Overview:**
::
Controller - General controller class intended for direct use
| |- from_port - Provides a Controller based on a port connection.
| +- from_socket_file - Provides a Controller based on a socket file connection.
|
|- authenticate - authenticates this controller with tor
|
|- get_info - issues a GETINFO query for a parameter
|- get_version - provides our tor version
|- get_exit_policy - provides our exit policy
|- get_ports - provides the local ports where tor is listening for connections
|- get_listeners - provides the addresses and ports where tor is listening for connections
|- get_accounting_stats - provides stats related to relaying limits
|- get_protocolinfo - information about the controller interface
|- get_user - provides the user tor is running as
|- get_pid - provides the pid of our tor process
|
|- get_microdescriptor - querying the microdescriptor for a relay
|- get_microdescriptors - provides all currently available microdescriptors
|- get_server_descriptor - querying the server descriptor for a relay
|- get_server_descriptors - provides all currently available server descriptors
|- get_network_status - querying the router status entry for a relay
|- get_network_statuses - provides all preently available router status entries
|- get_hidden_service_descriptor - queries the given hidden service descriptor
|
|- get_conf - gets the value of a configuration option
|- get_conf_map - gets the values of multiple configuration options
|- set_conf - sets the value of a configuration option
|- reset_conf - reverts configuration options to their default values
|- set_options - sets or resets the values of multiple configuration options
|
|- get_hidden_service_conf - provides our hidden service configuration
|- set_hidden_service_conf - sets our hidden service configuration
|- create_hidden_service - creates a new hidden service or adds a new port
|- remove_hidden_service - removes a hidden service or drops a port
|
|- list_ephemeral_hidden_services - list ephemeral hidden serivces
|- create_ephemeral_hidden_service - create a new ephemeral hidden service
|- remove_ephemeral_hidden_service - removes an ephemeral hidden service
|
|- add_event_listener - attaches an event listener to be notified of tor events
|- remove_event_listener - removes a listener so it isn't notified of further events
|
|- is_caching_enabled - true if the controller has enabled caching
|- set_caching - enables or disables caching
|- clear_cache - clears any cached results
|
|- load_conf - loads configuration information as if it was in the torrc
|- save_conf - saves configuration information to the torrc
|
|- is_feature_enabled - checks if a given controller feature is enabled
|- enable_feature - enables a controller feature that has been disabled by default
|
|- get_circuit - provides an active circuit
|- get_circuits - provides a list of active circuits
|- new_circuit - create new circuits
|- extend_circuit - create new circuits and extend existing ones
|- repurpose_circuit - change a circuit's purpose
|- close_circuit - close a circuit
|
|- get_streams - provides a list of active streams
|- attach_stream - attach a stream to a circuit
|- close_stream - close a stream
|
|- signal - sends a signal to the tor client
|- is_newnym_available - true if tor would currently accept a NEWNYM signal
|- get_newnym_wait - seconds until tor would accept a NEWNYM signal
|- get_effective_rate - provides our effective relaying rate limit
|- is_geoip_unavailable - true if we've discovered our geoip db to be unavailable
|- map_address - maps one address to another such that connections to the original are replaced with the other
+- drop_guards - drops our set of guard relays and picks a new set
BaseController - Base controller class asynchronous message handling
|- msg - communicates with the tor process
|- is_alive - reports if our connection to tor is open or closed
|- is_localhost - returns if the connection is for the local system or not
|- connection_time - time when we last connected or disconnected
|- is_authenticated - checks if we're authenticated to tor
|- connect - connects or reconnects to tor
|- close - shuts down our connection to the tor process
|- get_socket - provides the socket used for control communication
|- get_latest_heartbeat - timestamp for when we last heard from tor
|- add_status_listener - notifies a callback of changes in our status
|- remove_status_listener - prevents further notification of status changes
+- __enter__ / __exit__ - manages socket connection
.. data:: State (enum)
Enumeration for states that a controller can have.
========== ===========
State Description
========== ===========
**INIT** new control connection
**RESET** received a reset/sighup signal
**CLOSED** control connection closed
========== ===========
.. data:: EventType (enum)
Known types of events that the
:func:`~stem.control.Controller.add_event_listener` method of the
:class:`~stem.control.Controller` can listen for.
The most frequently listened for event types tend to be the logging events
(**DEBUG**, **INFO**, **NOTICE**, **WARN**, and **ERR**), bandwidth usage
(**BW**), and circuit or stream changes (**CIRC** and **STREAM**).
Enums are mapped to :class:`~stem.response.events.Event` subclasses as
follows...
======================= ===========
EventType Event Class
======================= ===========
**ADDRMAP** :class:`stem.response.events.AddrMapEvent`
**AUTHDIR_NEWDESCS** :class:`stem.response.events.AuthDirNewDescEvent`
**BUILDTIMEOUT_SET** :class:`stem.response.events.BuildTimeoutSetEvent`
**BW** :class:`stem.response.events.BandwidthEvent`
**CELL_STATS** :class:`stem.response.events.CellStatsEvent`
**CIRC** :class:`stem.response.events.CircuitEvent`
**CIRC_BW** :class:`stem.response.events.CircuitBandwidthEvent`
**CIRC_MINOR** :class:`stem.response.events.CircMinorEvent`
**CLIENTS_SEEN** :class:`stem.response.events.ClientsSeenEvent`
**CONF_CHANGED** :class:`stem.response.events.ConfChangedEvent`
**CONN_BW** :class:`stem.response.events.ConnectionBandwidthEvent`
**DEBUG** :class:`stem.response.events.LogEvent`
**DESCCHANGED** :class:`stem.response.events.DescChangedEvent`
**ERR** :class:`stem.response.events.LogEvent`
**GUARD** :class:`stem.response.events.GuardEvent`
**HS_DESC** :class:`stem.response.events.HSDescEvent`
**HS_DESC_CONTENT** :class:`stem.response.events.HSDescContentEvent`
**INFO** :class:`stem.response.events.LogEvent`
**NEWCONSENSUS** :class:`stem.response.events.NewConsensusEvent`
**NEWDESC** :class:`stem.response.events.NewDescEvent`
**NOTICE** :class:`stem.response.events.LogEvent`
**NS** :class:`stem.response.events.NetworkStatusEvent`
**ORCONN** :class:`stem.response.events.ORConnEvent`
**SIGNAL** :class:`stem.response.events.SignalEvent`
**STATUS_CLIENT** :class:`stem.response.events.StatusEvent`
**STATUS_GENERAL** :class:`stem.response.events.StatusEvent`
**STATUS_SERVER** :class:`stem.response.events.StatusEvent`
**STREAM** :class:`stem.response.events.StreamEvent`
**STREAM_BW** :class:`stem.response.events.StreamBwEvent`
**TB_EMPTY** :class:`stem.response.events.TokenBucketEmptyEvent`
**TRANSPORT_LAUNCHED** :class:`stem.response.events.TransportLaunchedEvent`
**WARN** :class:`stem.response.events.LogEvent`
======================= ===========
.. data:: Listener (enum)
Purposes for inbound connections that Tor handles.
============= ===========
Listener Description
============= ===========
**OR** traffic we're relaying as a member of the network (torrc's **ORPort** and **ORListenAddress**)
**DIR** mirroring for tor descriptor content (torrc's **DirPort** and **DirListenAddress**)
**SOCKS** client traffic we're sending over Tor (torrc's **SocksPort** and **SocksListenAddress**)
**TRANS** transparent proxy handling (torrc's **TransPort** and **TransListenAddress**)
**NATD** forwarding for ipfw NATD connections (torrc's **NatdPort** and **NatdListenAddress**)
**DNS** DNS lookups for our traffic (torrc's **DNSPort** and **DNSListenAddress**)
**CONTROL** controller applications (torrc's **ControlPort** and **ControlListenAddress**)
============= ===========
"""
import calendar
import collections
import functools
import inspect
import io
import os
import threading
import time
try:
# Added in 2.7
from collections import OrderedDict
except ImportError:
from stem.util.ordereddict import OrderedDict
try:
import queue
from io import StringIO
except ImportError:
import Queue as queue
from StringIO import StringIO
import stem.descriptor.microdescriptor
import stem.descriptor.reader
import stem.descriptor.router_status_entry
import stem.descriptor.server_descriptor
import stem.exit_policy
import stem.response
import stem.response.events
import stem.socket
import stem.util.connection
import stem.util.enum
import stem.util.str_tools
import stem.util.system
import stem.util.tor_tools
import stem.version
from stem import UNDEFINED, CircStatus, Signal, str_type
from stem.util import log
# state changes a control socket can have
State = stem.util.enum.Enum('INIT', 'RESET', 'CLOSED')
EventType = stem.util.enum.UppercaseEnum(
'ADDRMAP',
'AUTHDIR_NEWDESCS',
'BUILDTIMEOUT_SET',
'BW',
'CELL_STATS',
'CIRC',
'CIRC_BW',
'CIRC_MINOR',
'CONF_CHANGED',
'CONN_BW',
'CLIENTS_SEEN',
'DEBUG',
'DESCCHANGED',
'ERR',
'GUARD',
'HS_DESC',
'HS_DESC_CONTENT',
'INFO',
'NEWCONSENSUS',
'NEWDESC',
'NOTICE',
'NS',
'ORCONN',
'SIGNAL',
'STATUS_CLIENT',
'STATUS_GENERAL',
'STATUS_SERVER',
'STREAM',
'STREAM_BW',
'TB_EMPTY',
'TRANSPORT_LAUNCHED',
'WARN',
)
Listener = stem.util.enum.UppercaseEnum(
'OR',
'DIR',
'SOCKS',
'TRANS',
'NATD',
'DNS',
'CONTROL',
)
# Configuration options that are fetched by a special key. The keys are
# lowercase to make case insensitive lookups easier.
MAPPED_CONFIG_KEYS = {
'hiddenservicedir': 'HiddenServiceOptions',
'hiddenserviceport': 'HiddenServiceOptions',
'hiddenserviceversion': 'HiddenServiceOptions',
'hiddenserviceauthorizeclient': 'HiddenServiceOptions',
'hiddenserviceoptions': 'HiddenServiceOptions',
}
# unchangeable GETINFO parameters
CACHEABLE_GETINFO_PARAMS = (
'version',
'config-file',
'exit-policy/default',
'fingerprint',
'config/names',
'config/defaults',
'info/names',
'events/names',
'features/names',
'process/descriptor-limit',
)
# GETCONF parameters we shouldn't cache. This includes hidden service
# perameters due to the funky way they're set and retrieved (for instance,
# 'SETCONF HiddenServiceDir' effects 'GETCONF HiddenServiceOptions').
UNCACHEABLE_GETCONF_PARAMS = (
'hiddenserviceoptions',
'hiddenservicedir',
'hiddenserviceport',
'hiddenserviceversion',
'hiddenserviceauthorizeclient',
)
# number of sequential attempts before we decide that the Tor geoip database
# is unavailable
GEOIP_FAILURE_THRESHOLD = 5
SERVER_DESCRIPTORS_UNSUPPORTED = "Tor is currently not configured to retrieve \
server descriptors. As of Tor version 0.2.3.25 it downloads microdescriptors \
instead unless you set 'UseMicrodescriptors 0' in your torrc."
AccountingStats = collections.namedtuple('AccountingStats', [
'retrieved',
'status',
'interval_end',
'time_until_reset',
'read_bytes',
'read_bytes_left',
'read_limit',
'written_bytes',
'write_bytes_left',
'write_limit',
])
CreateHiddenServiceOutput = collections.namedtuple('CreateHiddenServiceOutput', [
'path',
'hostname',
'hostname_for_client',
'config',
])
def with_default(yields = False):
"""
Provides a decorator to support having a default value. This should be
treated as private.
"""
def decorator(func):
def get_default(func, args, kwargs):
arg_names = inspect.getargspec(func).args[1:] # drop 'self'
default_position = arg_names.index('default') if 'default' in arg_names else None
if default_position is not None and default_position < len(args):
return args[default_position]
else:
return kwargs.get('default', UNDEFINED)
if not yields:
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as exc:
default = get_default(func, args, kwargs)
if default == UNDEFINED:
raise exc
else:
return default
else:
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
try:
for val in func(self, *args, **kwargs):
yield val
except Exception as exc:
default = get_default(func, args, kwargs)
if default == UNDEFINED:
raise exc
else:
if default is not None:
for val in default:
yield val
return wrapped
return decorator
class BaseController(object):
"""
Controller for the tor process. This is a minimal base class for other
controllers, providing basic process communication and event listing. Don't
use this directly - subclasses like the :class:`~stem.control.Controller`
provide higher level functionality.
It's highly suggested that you don't interact directly with the
:class:`~stem.socket.ControlSocket` that we're constructed from - use our
wrapper methods instead.
If the **control_socket** is already authenticated to Tor then the caller
should provide the **is_authenticated** flag. Otherwise, we will treat the
socket as though it hasn't yet been authenticated.
"""
def __init__(self, control_socket, is_authenticated = False):
self._socket = control_socket
self._msg_lock = threading.RLock()
self._status_listeners = [] # tuples of the form (callback, spawn_thread)
self._status_listeners_lock = threading.RLock()
# queues where incoming messages are directed
self._reply_queue = queue.Queue()
self._event_queue = queue.Queue()
# thread to continually pull from the control socket
self._reader_thread = None
# thread to pull from the _event_queue and call handle_event
self._event_notice = threading.Event()
self._event_thread = None
# saves our socket's prior _connect() and _close() methods so they can be
# called along with ours
self._socket_connect = self._socket._connect
self._socket_close = self._socket._close
self._socket._connect = self._connect
self._socket._close = self._close
self._last_heartbeat = 0.0 # timestamp for when we last heard from tor
self._is_authenticated = False
self._state_change_threads = [] # threads we've spawned to notify of state changes
if self._socket.is_alive():
self._launch_threads()
if is_authenticated:
self._post_authentication()
def msg(self, message):
"""
Sends a message to our control socket and provides back its reply.
:param str message: message to be formatted and sent to tor
:returns: :class:`~stem.response.ControlMessage` with the response
:raises:
* :class:`stem.ProtocolError` the content from the socket is
malformed
* :class:`stem.SocketError` if a problem arises in using the
socket
* :class:`stem.SocketClosed` if the socket is shut down
"""
with self._msg_lock:
# If our _reply_queue isn't empty then one of a few things happened...
#
# - Our connection was closed and probably re-restablished. This was
# in reply to pulling for an asynchronous event and getting this is
# expected - ignore it.
#
# - Pulling for asynchronous events produced an error. If this was a
# ProtocolError then it's a tor bug, and if a non-closure SocketError
# then it was probably a socket glitch. Deserves an INFO level log
# message.
#
# - This is a leftover response for a msg() call. We can't tell who an
# exception was earmarked for, so we only know that this was the case
# if it's a ControlMessage.
#
# This is the most concerning situation since it indicates that one of
# our callers didn't get their reply. However, this is still a
# perfectly viable use case. For instance...
#
# 1. We send a request.
# 2. The reader thread encounters an exception, for instance a socket
# error. We enqueue the exception.
# 3. The reader thread receives the reply.
# 4. We raise the socket error, and have an undelivered message.
#
# Thankfully this only seems to arise in edge cases around rapidly
# closing/reconnecting the socket.
while not self._reply_queue.empty():
try:
response = self._reply_queue.get_nowait()
if isinstance(response, stem.SocketClosed):
pass # this is fine
elif isinstance(response, stem.ProtocolError):
log.info('Tor provided a malformed message (%s)' % response)
elif isinstance(response, stem.ControllerError):
log.info('Socket experienced a problem (%s)' % response)
elif isinstance(response, stem.response.ControlMessage):
log.info('Failed to deliver a response: %s' % response)
except queue.Empty:
# the empty() method is documented to not be fully reliable so this
# isn't entirely surprising
break
try:
self._socket.send(message)
response = self._reply_queue.get()
# If the message we received back had an exception then re-raise it to the
# caller. Otherwise return the response.
if isinstance(response, stem.ControllerError):
raise response
else:
# I really, really don't like putting hooks into this method, but
# this is the most reliable method I can think of for taking actions
# immediately after successfully authenticating to a connection.
if message.upper().startswith('AUTHENTICATE'):
self._post_authentication()
return response
except stem.SocketClosed as exc:
# If the recv() thread caused the SocketClosed then we could still be
# in the process of closing. Calling close() here so that we can
# provide an assurance to the caller that when we raise a SocketClosed
# exception we are shut down afterward for realz.
self.close()
raise exc
def is_alive(self):
"""
Checks if our socket is currently connected. This is a pass-through for our
socket's :func:`~stem.socket.ControlSocket.is_alive` method.
:returns: **bool** that's **True** if our socket is connected and **False** otherwise
"""
return self._socket.is_alive()
def is_localhost(self):
"""
Returns if the connection is for the local system or not.
.. versionadded:: 1.3.0
:returns: **bool** that's **True** if the connection is for the local host and **False** otherwise
"""
return self._socket.is_localhost()
def connection_time(self):
"""
Provides the unix timestamp for when our socket was either connected or
disconnected. That is to say, the time we connected if we're currently
connected and the time we disconnected if we're not connected.
.. versionadded:: 1.3.0
:returns: **float** for when we last connected or disconnected, zero if
we've never connected
"""
return self._socket.connection_time()
def is_authenticated(self):
"""
Checks if our socket is both connected and authenticated.
:returns: **bool** that's **True** if our socket is authenticated to tor
and **False** otherwise
"""
if self.is_alive():
return self._is_authenticated
return False
def connect(self):
"""
Reconnects our control socket. This is a pass-through for our socket's
:func:`~stem.socket.ControlSocket.connect` method.
:raises: :class:`stem.SocketError` if unable to make a socket
"""
self._socket.connect()
def close(self):
"""
Closes our socket connection. This is a pass-through for our socket's
:func:`~stem.socket.ControlSocket.close` method.
"""
self._socket.close()
# Join on any outstanding state change listeners. Closing is a state change
# of its own, so if we have any listeners it's quite likely there's some
# work in progress.
#
# It's important that we do this outside of our locks so those daemons have
# access to us. This is why we're doing this here rather than _close().
for t in self._state_change_threads:
if t.is_alive() and threading.current_thread() != t:
t.join()
def get_socket(self):
"""
Provides the socket used to speak with the tor process. Communicating with
the socket directly isn't advised since it may confuse this controller.
:returns: :class:`~stem.socket.ControlSocket` we're communicating with
"""
return self._socket
def get_latest_heartbeat(self):
"""
Provides the unix timestamp for when we last heard from tor. This is zero
if we've never received a message.
:returns: float for the unix timestamp of when we last heard from tor
"""
return self._last_heartbeat
def add_status_listener(self, callback, spawn = True):
"""
Notifies a given function when the state of our socket changes. Functions
are expected to be of the form...
::
my_function(controller, state, timestamp)
The state is a value from the :data:`stem.control.State` enum. Functions
**must** allow for new values. The timestamp is a float for the unix time
when the change occurred.
This class only provides **State.INIT** and **State.CLOSED** notifications.
Subclasses may provide others.
If spawn is **True** then the callback is notified via a new daemon thread.
If **False** then the notice is under our locks, within the thread where
the change occurred. In general this isn't advised, especially if your
callback could block for a while. If still outstanding these threads are
joined on as part of closing this controller.
:param function callback: function to be notified when our state changes
:param bool spawn: calls function via a new thread if **True**, otherwise
it's part of the connect/close method call
"""
with self._status_listeners_lock:
self._status_listeners.append((callback, spawn))
def remove_status_listener(self, callback):
"""
Stops listener from being notified of further events.
:param function callback: function to be removed from our listeners
:returns: **bool** that's **True** if we removed one or more occurrences of
the callback, **False** otherwise
"""
with self._status_listeners_lock:
new_listeners, is_changed = [], False
for listener, spawn in self._status_listeners:
if listener != callback:
new_listeners.append((listener, spawn))
else:
is_changed = True
self._status_listeners = new_listeners
return is_changed
def __enter__(self):
return self
def __exit__(self, exit_type, value, traceback):
self.close()
def _handle_event(self, event_message):
"""
Callback to be overwritten by subclasses for event listening. This is
notified whenever we receive an event from the control socket.
:param stem.response.ControlMessage event_message: message received from
the control socket
"""
pass
def _connect(self):
self._launch_threads()
self._notify_status_listeners(State.INIT)
self._socket_connect()
self._is_authenticated = False
def _close(self):
# Our is_alive() state is now false. Our reader thread should already be
# awake from recv() raising a closure exception. Wake up the event thread
# too so it can end.
self._event_notice.set()
self._is_authenticated = False
# joins on our threads if it's safe to do so
for t in (self._reader_thread, self._event_thread):
if t and t.is_alive() and threading.current_thread() != t:
t.join()
self._notify_status_listeners(State.CLOSED)
self._socket_close()
def _post_authentication(self):
# actions to be taken after we have a newly authenticated connection
self._is_authenticated = True
def _notify_status_listeners(self, state):
"""
Informs our status listeners that a state change occurred.
:param stem.control.State state: state change that has occurred
"""
# Any changes to our is_alive() state happen under the send lock, so we
# need to have it to ensure it doesn't change beneath us.
with self._socket._get_send_lock():
with self._status_listeners_lock:
# States imply that our socket is either alive or not, which may not
# hold true when multiple events occur in quick succession. For
# instance, a sighup could cause two events (State.RESET for the sighup
# and State.CLOSE if it causes tor to crash). However, there's no
# guarantee of the order in which they occur, and it would be bad if
# listeners got the State.RESET last, implying that we were alive.
expect_alive = None
if state in (State.INIT, State.RESET):
expect_alive = True
elif state == State.CLOSED:
expect_alive = False
change_timestamp = time.time()
if expect_alive is not None and expect_alive != self.is_alive():
return
self._state_change_threads = list(filter(lambda t: t.is_alive(), self._state_change_threads))
for listener, spawn in self._status_listeners:
if spawn:
name = '%s notification' % state
args = (self, state, change_timestamp)
notice_thread = threading.Thread(target = listener, args = args, name = name)
notice_thread.setDaemon(True)
notice_thread.start()
self._state_change_threads.append(notice_thread)
else:
listener(self, state, change_timestamp)
def _launch_threads(self):
"""
Initializes daemon threads. Threads can't be reused so we need to recreate
them if we're restarted.
"""
# In theory concurrent calls could result in multiple start() calls on a
# single thread, which would cause an unexpected exception. Best be safe.
with self._socket._get_send_lock():
if not self._reader_thread or not self._reader_thread.is_alive():
self._reader_thread = threading.Thread(target = self._reader_loop, name = 'Tor Listener')
self._reader_thread.setDaemon(True)
self._reader_thread.start()
if not self._event_thread or not self._event_thread.is_alive():
self._event_thread = threading.Thread(target = self._event_loop, name = 'Event Notifier')
self._event_thread.setDaemon(True)
self._event_thread.start()
def _reader_loop(self):
"""
Continually pulls from the control socket, directing the messages into
queues based on their type. Controller messages come in two varieties...
* Responses to messages we've sent (GETINFO, SETCONF, etc).
* Asynchronous events, identified by a status code of 650.
"""
while self.is_alive():
try:
control_message = self._socket.recv()
self._last_heartbeat = time.time()
if control_message.content()[-1][0] == '650':
# asynchronous message, adds to the event queue and wakes up its handler
self._event_queue.put(control_message)
self._event_notice.set()
else:
# response to a msg() call
self._reply_queue.put(control_message)
except stem.ControllerError as exc:
# Assume that all exceptions belong to the reader. This isn't always
# true, but the msg() call can do a better job of sorting it out.
#
# Be aware that the msg() method relies on this to unblock callers.
self._reply_queue.put(exc)
def _event_loop(self):
"""
Continually pulls messages from the _event_queue and sends them to our
handle_event callback. This is done via its own thread so subclasses with a
lengthy handle_event implementation don't block further reading from the
socket.
"""
while True:
try:
event_message = self._event_queue.get_nowait()
self._handle_event(event_message)
except queue.Empty:
if not self.is_alive():
break
self._event_notice.wait()
self._event_notice.clear()
class Controller(BaseController):
"""
Communicates with a control socket. This is built on top of the
BaseController and provides a more user friendly API for library users.
"""
@staticmethod
def from_port(address = '127.0.0.1', port = 9051):
"""
Constructs a :class:`~stem.socket.ControlPort` based Controller.
:param str address: ip address of the controller
:param int port: port number of the controller
:returns: :class:`~stem.control.Controller` attached to the given port
:raises: :class:`stem.SocketError` if we're unable to establish a connection
"""
if not stem.util.connection.is_valid_ipv4_address(address):
raise ValueError('Invalid IP address: %s' % address)
elif not stem.util.connection.is_valid_port(port):
raise ValueError('Invalid port: %s' % port)
control_port = stem.socket.ControlPort(address, port)
return Controller(control_port)
@staticmethod
def from_socket_file(path = '/var/run/tor/control'):
"""
Constructs a :class:`~stem.socket.ControlSocketFile` based Controller.
:param str path: path where the control socket is located
:returns: :class:`~stem.control.Controller` attached to the given socket file
:raises: :class:`stem.SocketError` if we're unable to establish a connection
"""
control_socket = stem.socket.ControlSocketFile(path)
return Controller(control_socket)
def __init__(self, control_socket, is_authenticated = False):
self._is_caching_enabled = True
self._request_cache = {}
self._last_newnym = 0.0
self._cache_lock = threading.RLock()
# mapping of event types to their listeners
self._event_listeners = {}
self._event_listeners_lock = threading.RLock()
# number of sequential 'GETINFO ip-to-country/*' lookups that have failed
self._geoip_failure_count = 0
self._enabled_features = []
super(Controller, self).__init__(control_socket, is_authenticated)
def _sighup_listener(event):
if event.signal == Signal.RELOAD:
self.clear_cache()
self._notify_status_listeners(State.RESET)
self.add_event_listener(_sighup_listener, EventType.SIGNAL)
def _confchanged_listener(event):
if self.is_caching_enabled():
self._set_cache(dict((k, None) for k in event.config), 'getconf')
if 'exitpolicy' in event.config.keys():
self._set_cache({'exitpolicy': None})
self.add_event_listener(_confchanged_listener, EventType.CONF_CHANGED)
def connect(self):
super(Controller, self).connect()
self.clear_cache()
def close(self):
# making a best-effort attempt to quit before detaching the socket
if self.is_alive():
try:
self.msg('QUIT')
except:
pass
self.clear_cache()
super(Controller, self).close()
def authenticate(self, *args, **kwargs):
"""
A convenience method to authenticate the controller. This is just a
pass-through to :func:`stem.connection.authenticate`.
"""
import stem.connection
stem.connection.authenticate(self, *args, **kwargs)
@with_default()
def get_info(self, params, default = UNDEFINED, get_bytes = False):
"""
get_info(params, default = UNDEFINED, get_bytes = False)
Queries the control socket for the given GETINFO option. If provided a
default then that's returned if the GETINFO option is undefined or the
call fails for any reason (error response, control port closed, initiated,
etc).
.. versionchanged:: 1.1.0
Added the get_bytes argument.
:param str,list params: GETINFO option or options to be queried
:param object default: response if the query fails
:param bool get_bytes: provides **bytes** values rather than a **str** under python 3.x
:returns:
Response depends upon how we were called as follows...
* **str** with the response if our param was a **str**
* **dict** with the 'param => response' mapping if our param was a **list**
* default if one was provided and our call failed
:raises:
* :class:`stem.ControllerError` if the call fails and we weren't
provided a default response
* :class:`stem.InvalidArguments` if the 'params' requested was
invalid
* :class:`stem.ProtocolError` if the geoip database is known to be
unavailable
"""
start_time = time.time()
reply = {}
if isinstance(params, (bytes, str_type)):
is_multiple = False
params = set([params])
else:
if not params:
return {}
is_multiple = True
params = set(params)
# check for cached results
from_cache = [param.lower() for param in params]
cached_results = self._get_cache_map(from_cache, 'getinfo')
for key in cached_results:
user_expected_key = _case_insensitive_lookup(params, key)
reply[user_expected_key] = cached_results[key]
params.remove(user_expected_key)
for param in params:
if param.startswith('ip-to-country/') and self.is_geoip_unavailable():
# the geoip database already looks to be unavailable - abort the request
raise stem.ProtocolError('Tor geoip database is unavailable')
# if everything was cached then short circuit making the query
if not params:
log.trace('GETINFO %s (cache fetch)' % ' '.join(reply.keys()))
if is_multiple:
return reply
else:
return list(reply.values())[0]
try:
response = self.msg('GETINFO %s' % ' '.join(params))
stem.response.convert('GETINFO', response)
response._assert_matches(params)
# usually we want unicode values under python 3.x
if stem.prereq.is_python_3() and not get_bytes:
response.entries = dict((k, stem.util.str_tools._to_unicode(v)) for (k, v) in response.entries.items())
reply.update(response.entries)
if self.is_caching_enabled():
to_cache = {}
for key, value in response.entries.items():
key = key.lower() # make case insensitive
if key in CACHEABLE_GETINFO_PARAMS:
to_cache[key] = value
elif key.startswith('ip-to-country/'):
# both cache-able and means that we should reset the geoip failure count
to_cache[key] = value
self._geoip_failure_count = -1
self._set_cache(to_cache, 'getinfo')
log.debug('GETINFO %s (runtime: %0.4f)' % (' '.join(params), time.time() - start_time))
if is_multiple:
return reply
else:
return list(reply.values())[0]
except stem.ControllerError as exc:
# bump geoip failure count if...
# * we're caching results
# * this was soley a geoip lookup
# * we've never had a successful geoip lookup (failure count isn't -1)
is_geoip_request = len(params) == 1 and list(params)[0].startswith('ip-to-country/')
if is_geoip_request and self.is_caching_enabled() and self._geoip_failure_count != -1:
self._geoip_failure_count += 1
if self.is_geoip_unavailable():
log.warn("Tor's geoip database is unavailable.")
log.debug('GETINFO %s (failed: %s)' % (' '.join(params), exc))
raise exc
@with_default()
def get_version(self, default = UNDEFINED):
"""
get_version(default = UNDEFINED)
A convenience method to get tor version that current controller is
connected to.
:param object default: response if the query fails
:returns: :class:`~stem.version.Version` of the tor instance that we're
connected to
:raises:
* :class:`stem.ControllerError` if unable to query the version
* **ValueError** if unable to parse the version
An exception is only raised if we weren't provided a default response.
"""
version = self._get_cache('version')
if not version:
version = stem.version.Version(self.get_info('version'))
self._set_cache({'version': version})
return version
@with_default()
def get_exit_policy(self, default = UNDEFINED):
"""
get_exit_policy(default = UNDEFINED)
Effective ExitPolicy for our relay. This accounts for
ExitPolicyRejectPrivate and default policies.
:param object default: response if the query fails
:returns: :class:`~stem.exit_policy.ExitPolicy` of the tor instance that
we're connected to
:raises:
* :class:`stem.ControllerError` if unable to query the policy
* **ValueError** if unable to parse the policy
An exception is only raised if we weren't provided a default response.
"""
with self._msg_lock:
config_policy = self._get_cache('exit_policy')
if not config_policy:
policy = []
if self.get_conf('ExitPolicyRejectPrivate') == '1':
policy.append('reject private:*')
for policy_line in self.get_conf('ExitPolicy', multiple = True):
policy += policy_line.split(',')
policy += self.get_info('exit-policy/default').split(',')
config_policy = stem.exit_policy.get_config_policy(policy, self.get_info('address', None))
self._set_cache({'exit_policy': config_policy})
return config_policy
@with_default()
def get_ports(self, listener_type, default = UNDEFINED):
"""
get_ports(listener_type, default = UNDEFINED)
Provides the local ports where tor is listening for the given type of
connections. This is similar to
:func:`~stem.control.Controller.get_listeners`, but doesn't provide
addresses nor include non-local endpoints.
.. versionadded:: 1.2.0
:param stem.control.Listener listener_type: connection type being handled
by the ports we return
:param object default: response if the query fails
:returns: **list** of **ints** for the local ports where tor handles
connections of the given type
:raises: :class:`stem.ControllerError` if unable to determine the ports
and no default was provided
"""
return [port for (addr, port) in self.get_listeners(listener_type) if addr == '127.0.0.1']
@with_default()
def get_listeners(self, listener_type, default = UNDEFINED):
"""
get_listeners(listener_type, default = UNDEFINED)
Provides the addresses and ports where tor is listening for connections of
the given type. This is similar to
:func:`~stem.control.Controller.get_ports` but includes listener addresses
and non-local endpoints.
.. versionadded:: 1.2.0
:param stem.control.Listener listener_type: connection type being handled
by the listeners we return
:param object default: response if the query fails
:returns: **list** of **(address, port)** tuples for the available
listeners
:raises: :class:`stem.ControllerError` if unable to determine the listeners
and no default was provided
"""
proxy_addrs = []
query = 'net/listeners/%s' % listener_type.lower()
try:
for listener in self.get_info(query).split():
if not (listener.startswith('"') and listener.endswith('"')):
raise stem.ProtocolError("'GETINFO %s' responses are expected to be quoted: %s" % (query, listener))
elif ':' not in listener:
raise stem.ProtocolError("'GETINFO %s' had a listener without a colon: %s" % (query, listener))
listener = listener[1:-1] # strip quotes
addr, port = listener.split(':')
# Skip unix sockets, for instance...
#
# GETINFO net/listeners/control
# 250-net/listeners/control="unix:/tmp/tor/socket"
# 250 OK
if addr == 'unix':
continue
proxy_addrs.append((addr, port))
except stem.InvalidArguments:
# Tor version is old (pre-tor-0.2.2.26-beta), use get_conf() instead.
# Some options (like the ORPort) can have optional attributes after the
# actual port number.
port_option = {
Listener.OR: 'ORPort',
Listener.DIR: 'DirPort',
Listener.SOCKS: 'SocksPort',
Listener.TRANS: 'TransPort',
Listener.NATD: 'NatdPort',
Listener.DNS: 'DNSPort',
Listener.CONTROL: 'ControlPort',
}[listener_type]
listener_option = {
Listener.OR: 'ORListenAddress',
Listener.DIR: 'DirListenAddress',
Listener.SOCKS: 'SocksListenAddress',
Listener.TRANS: 'TransListenAddress',
Listener.NATD: 'NatdListenAddress',
Listener.DNS: 'DNSListenAddress',
Listener.CONTROL: 'ControlListenAddress',
}[listener_type]
port_value = self.get_conf(port_option).split()[0]
for listener in self.get_conf(listener_option, multiple = True):
if ':' in listener:
addr, port = listener.split(':')
proxy_addrs.append((addr, port))
else:
proxy_addrs.append((listener, port_value))
# validate that address/ports are valid, and convert ports to ints
for addr, port in proxy_addrs:
if not stem.util.connection.is_valid_ipv4_address(addr):
raise stem.ProtocolError('Invalid address for a %s listener: %s' % (listener_type, addr))
elif not stem.util.connection.is_valid_port(port):
raise stem.ProtocolError('Invalid port for a %s listener: %s' % (listener_type, port))
return [(addr, int(port)) for (addr, port) in proxy_addrs]
@with_default()
def get_accounting_stats(self, default = UNDEFINED):
"""
get_accounting_stats(default = UNDEFINED)
Provides stats related to our relaying limitations if AccountingMax was set
in our torrc. This provides a **namedtuple** with the following
attributes...
* retrieved (float) - unix timestamp for when this was fetched
* status (str) - hibernation status of 'awake', 'soft', or 'hard'
* interval_end (datetime)
* time_until_reset (int) - seconds until our limits reset
* read_bytes (int)
* read_bytes_left (int)
* read_limit (int)
* written_bytes (int)
* write_bytes_left (int)
* write_limit (int)
.. versionadded:: 1.3.0
:param object default: response if the query fails
:returns: **namedtuple** with our accounting stats
:raises: :class:`stem.ControllerError` if unable to determine the listeners
and no default was provided
"""
if self.get_info('accounting/enabled') != '1':
raise stem.ControllerError("Accounting isn't enabled")
retrieved = time.time()
status = self.get_info('accounting/hibernating')
interval_end = self.get_info('accounting/interval-end')
used = self.get_info('accounting/bytes')
left = self.get_info('accounting/bytes-left')
interval_end = stem.util.str_tools._parse_timestamp(interval_end)
used_read, used_written = [int(val) for val in used.split(' ', 1)]
left_read, left_written = [int(val) for val in left.split(' ', 1)]
return AccountingStats(
retrieved = retrieved,
status = status,
interval_end = interval_end,
time_until_reset = calendar.timegm(interval_end.timetuple()) - int(retrieved),
read_bytes = used_read,
read_bytes_left = left_read,
read_limit = used_read + left_read,
written_bytes = used_written,
write_bytes_left = left_written,
write_limit = used_written + left_written,
)
def get_socks_listeners(self, default = UNDEFINED):
"""
Provides the SOCKS **(address, port)** tuples that tor has open.
.. deprecated:: 1.2.0
Use :func:`~stem.control.Controller.get_listeners` with
**Listener.SOCKS** instead.
:param object default: response if the query fails
:returns: list of **(address, port)** tuples for the available SOCKS
listeners
:raises: :class:`stem.ControllerError` if unable to determine the listeners
and no default was provided
"""
return self.get_listeners(Listener.SOCKS, default)
@with_default()
def get_protocolinfo(self, default = UNDEFINED):
"""
get_protocolinfo(default = UNDEFINED)
A convenience method to get the protocol info of the controller.
:param object default: response if the query fails
:returns: :class:`~stem.response.protocolinfo.ProtocolInfoResponse` provided by tor
:raises:
* :class:`stem.ProtocolError` if the PROTOCOLINFO response is
malformed
* :class:`stem.SocketError` if problems arise in establishing or
using the socket
An exception is only raised if we weren't provided a default response.
"""
import stem.connection
return stem.connection.get_protocolinfo(self)
@with_default()
def get_user(self, default = UNDEFINED):
"""
get_user(default = UNDEFINED)
Provides the user tor is running as. This often only works if tor is
running locally. Also, most of its checks are platform dependent, and hence
are not entirely reliable.
.. versionadded:: 1.1.0
:param object default: response if the query fails
:returns: str with the username tor is running as
"""
user = self._get_cache('user')
if not user:
user = self.get_info('process/user', None)
if not user and self.is_localhost():
pid = self.get_pid(None)
if pid:
user = stem.util.system.user(pid)
if user:
self._set_cache({'user': user})
return user
else:
raise ValueError("Unable to resolve tor's user" if self.is_localhost() else "Tor isn't running locally")
@with_default()
def get_pid(self, default = UNDEFINED):
"""
get_pid(default = UNDEFINED)
Provides the process id of tor. This often only works if tor is running
locally. Also, most of its checks are platform dependent, and hence are not
entirely reliable.
.. versionadded:: 1.1.0
:param object default: response if the query fails
:returns: **int** for tor's pid
:raises: **ValueError** if unable to determine the pid and no default was
provided
"""
pid = self._get_cache('pid')
if not pid:
getinfo_pid = self.get_info('process/pid', None)
if getinfo_pid and getinfo_pid.isdigit():
pid = int(getinfo_pid)
if not pid and self.is_localhost():
pid_file_path = self.get_conf('PidFile', None)
if pid_file_path is not None:
with open(pid_file_path) as pid_file:
pid_file_contents = pid_file.read().strip()
if pid_file_contents.isdigit():
pid = int(pid_file_contents)
if not pid:
pid = stem.util.system.pid_by_name('tor')
if not pid:
control_socket = self.get_socket()
if isinstance(control_socket, stem.socket.ControlPort):
pid = stem.util.system.pid_by_port(control_socket.get_port())
elif isinstance(control_socket, stem.socket.ControlSocketFile):
pid = stem.util.system.pid_by_open_file(control_socket.get_socket_path())
if pid:
self._set_cache({'pid': pid})
return pid
else:
raise ValueError("Unable to resolve tor's pid" if self.is_localhost() else "Tor isn't running locally")
@with_default()
def get_microdescriptor(self, relay = None, default = UNDEFINED):
"""
get_microdescriptor(relay = None, default = UNDEFINED)
Provides the microdescriptor for the relay with the given fingerprint or
nickname. If the relay identifier could be either a fingerprint *or*
nickname then it's queried as a fingerprint.
If no **relay** is provided then this defaults to ourselves. Remember that
this requires that we've retrieved our own descriptor from remote
authorities so this both won't be available for newly started relays and
may be up to around an hour out of date.
.. versionchanged:: 1.3.0
Changed so we'd fetch our own descriptor if no 'relay' is provided.
:param str relay: fingerprint or nickname of the relay to be queried
:param object default: response if the query fails
:returns: :class:`~stem.descriptor.microdescriptor.Microdescriptor` for the given relay
:raises:
* :class:`stem.DescriptorUnavailable` if unable to provide a descriptor
for the given relay
* :class:`stem.ControllerError` if unable to query the descriptor
* **ValueError** if **relay** doesn't conform with the pattern for being
a fingerprint or nickname
An exception is only raised if we weren't provided a default response.
"""
if relay is None:
try:
relay = self.get_info('fingerprint')
except stem.ControllerError as exc:
raise stem.ControllerError('Unable to determine our own fingerprint: %s' % exc)
if stem.util.tor_tools.is_valid_fingerprint(relay):
query = 'md/id/%s' % relay
elif stem.util.tor_tools.is_valid_nickname(relay):
query = 'md/name/%s' % relay
else:
raise ValueError("'%s' isn't a valid fingerprint or nickname" % relay)
try:
desc_content = self.get_info(query, get_bytes = True)
except stem.InvalidArguments as exc:
if str(exc).startswith('GETINFO request contained unrecognized keywords:'):
raise stem.DescriptorUnavailable("Tor was unable to provide the descriptor for '%s'" % relay)
else:
raise exc
if not desc_content:
raise stem.DescriptorUnavailable('Descriptor information is unavailable, tor might still be downloading it')
return stem.descriptor.microdescriptor.Microdescriptor(desc_content)
@with_default(yields = True)
def get_microdescriptors(self, default = UNDEFINED):
"""
get_microdescriptors(default = UNDEFINED)
Provides an iterator for all of the microdescriptors that tor currently
knows about.
**Tor does not expose this information via the control protocol**
(:trac:`8323`). Until it does this reads the microdescriptors from disk,
and hence won't work remotely or if we lack read permissions.
:param list default: items to provide if the query fails
:returns: iterates over
:class:`~stem.descriptor.microdescriptor.Microdescriptor` for relays in
the tor network
:raises: :class:`stem.ControllerError` if unable to query tor and no
default was provided
"""
try:
data_directory = self.get_conf('DataDirectory')
except stem.ControllerError as exc:
raise stem.OperationFailed(message = 'Unable to determine the data directory (%s)' % exc)
cached_descriptor_path = os.path.join(data_directory, 'cached-microdescs')
if not os.path.exists(data_directory):
raise stem.OperationFailed(message = "Data directory reported by tor doesn't exist (%s)" % data_directory)
elif not os.path.exists(cached_descriptor_path):
raise stem.OperationFailed(message = "Data directory doens't contain cached microescriptors (%s)" % cached_descriptor_path)
with stem.descriptor.reader.DescriptorReader([cached_descriptor_path]) as reader:
for desc in reader:
# It shouldn't be possible for these to be something other than
# microdescriptors but as the saying goes: trust but verify.
if not isinstance(desc, stem.descriptor.microdescriptor.Microdescriptor):
raise stem.OperationFailed(message = 'BUG: Descriptor reader provided non-microdescriptor content (%s)' % type(desc))
yield desc
@with_default()
def get_server_descriptor(self, relay = None, default = UNDEFINED):
"""
get_server_descriptor(relay = None, default = UNDEFINED)
Provides the server descriptor for the relay with the given fingerprint or
nickname. If the relay identifier could be either a fingerprint *or*
nickname then it's queried as a fingerprint.
If no **relay** is provided then this defaults to ourselves. Remember that
this requires that we've retrieved our own descriptor from remote
authorities so this both won't be available for newly started relays and
may be up to around an hour out of date.
**As of Tor version 0.2.3.25 relays no longer get server descriptors by
default.** It's advised that you use microdescriptors instead, but if you
really need server descriptors then you can get them by setting
'UseMicrodescriptors 0'.
.. versionchanged:: 1.3.0
Changed so we'd fetch our own descriptor if no 'relay' is provided.
:param str relay: fingerprint or nickname of the relay to be queried
:param object default: response if the query fails
:returns: :class:`~stem.descriptor.server_descriptor.RelayDescriptor` for the given relay
:raises:
* :class:`stem.DescriptorUnavailable` if unable to provide a descriptor
for the given relay
* :class:`stem.ControllerError` if unable to query the descriptor
* **ValueError** if **relay** doesn't conform with the pattern for being
a fingerprint or nickname
An exception is only raised if we weren't provided a default response.
"""
try:
if relay is None:
try:
relay = self.get_info('fingerprint')
except stem.ControllerError as exc:
raise stem.ControllerError('Unable to determine our own fingerprint: %s' % exc)
if stem.util.tor_tools.is_valid_fingerprint(relay):
query = 'desc/id/%s' % relay
elif stem.util.tor_tools.is_valid_nickname(relay):
query = 'desc/name/%s' % relay
else:
raise ValueError("'%s' isn't a valid fingerprint or nickname" % relay)
try:
desc_content = self.get_info(query, get_bytes = True)
except stem.InvalidArguments as exc:
if str(exc).startswith('GETINFO request contained unrecognized keywords:'):
raise stem.DescriptorUnavailable("Tor was unable to provide the descriptor for '%s'" % relay)
else:
raise exc
if not desc_content:
raise stem.DescriptorUnavailable('Descriptor information is unavailable, tor might still be downloading it')
return stem.descriptor.server_descriptor.RelayDescriptor(desc_content)
except Exception as exc:
if not self._is_server_descriptors_available():
raise ValueError(SERVER_DESCRIPTORS_UNSUPPORTED)
raise exc
@with_default(yields = True)
def get_server_descriptors(self, default = UNDEFINED):
"""
get_server_descriptors(default = UNDEFINED)
Provides an iterator for all of the server descriptors that tor currently
knows about.
**As of Tor version 0.2.3.25 relays no longer get server descriptors by
default.** It's advised that you use microdescriptors instead, but if you
really need server descriptors then you can get them by setting
'UseMicrodescriptors 0'.
:param list default: items to provide if the query fails
:returns: iterates over
:class:`~stem.descriptor.server_descriptor.RelayDescriptor` for relays in
the tor network
:raises: :class:`stem.ControllerError` if unable to query tor and no
default was provided
"""
# TODO: We should iterate over the descriptors as they're read from the
# socket rather than reading the whole thing into memory.
#
# https://trac.torproject.org/8248
desc_content = self.get_info('desc/all-recent', get_bytes = True)
if not desc_content:
if not self._is_server_descriptors_available():
raise stem.ControllerError(SERVER_DESCRIPTORS_UNSUPPORTED)
else:
raise stem.DescriptorUnavailable('Descriptor information is unavailable, tor might still be downloading it')
for desc in stem.descriptor.server_descriptor._parse_file(io.BytesIO(desc_content)):
yield desc
def _is_server_descriptors_available(self):
"""
Checks to see if tor server descriptors should be available or not.
"""
return self.get_version() < stem.version.Requirement.MICRODESCRIPTOR_IS_DEFAULT or \
self.get_conf('UseMicrodescriptors', None) == '0'
@with_default()
def get_network_status(self, relay = None, default = UNDEFINED):
"""
get_network_status(relay = None, default = UNDEFINED)
Provides the router status entry for the relay with the given fingerprint
or nickname. If the relay identifier could be either a fingerprint *or*
nickname then it's queried as a fingerprint.
This provides
:class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3`
instances if tor is using microdescriptors...
::
controller.get_conf('UseMicrodescriptors', '0') == '1'
... and :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`
otherwise.
If no **relay** is provided then this defaults to ourselves. Remember that
this requires that we've retrieved our own descriptor from remote
authorities so this both won't be available for newly started relays and
may be up to around an hour out of date.
.. versionchanged:: 1.3.0
Changed so we'd fetch our own descriptor if no 'relay' is provided.
:param str relay: fingerprint or nickname of the relay to be queried
:param object default: response if the query fails
:returns: :class:`~stem.descriptor.router_status_entry.RouterStatusEntry`
for the given relay
:raises:
* :class:`stem.DescriptorUnavailable` if unable to provide a descriptor
for the given relay
* :class:`stem.ControllerError` if unable to query the descriptor
* **ValueError** if **relay** doesn't conform with the pattern for being
a fingerprint or nickname
An exception is only raised if we weren't provided a default response.
"""
if relay is None:
try:
relay = self.get_info('fingerprint')
except stem.ControllerError as exc:
raise stem.ControllerError('Unable to determine our own fingerprint: %s' % exc)
if stem.util.tor_tools.is_valid_fingerprint(relay):
query = 'ns/id/%s' % relay
elif stem.util.tor_tools.is_valid_nickname(relay):
query = 'ns/name/%s' % relay
else:
raise ValueError("'%s' isn't a valid fingerprint or nickname" % relay)
try:
desc_content = self.get_info(query, get_bytes = True)
except stem.InvalidArguments as exc:
if str(exc).startswith('GETINFO request contained unrecognized keywords:'):
raise stem.DescriptorUnavailable("Tor was unable to provide the descriptor for '%s'" % relay)
else:
raise exc
if not desc_content:
raise stem.DescriptorUnavailable('Descriptor information is unavailable, tor might still be downloading it')
if self.get_conf('UseMicrodescriptors', '0') == '1':
return stem.descriptor.router_status_entry.RouterStatusEntryMicroV3(desc_content)
else:
return stem.descriptor.router_status_entry.RouterStatusEntryV3(desc_content)
@with_default(yields = True)
def get_network_statuses(self, default = UNDEFINED):
"""
get_network_statuses(default = UNDEFINED)
Provides an iterator for all of the router status entries that tor
currently knows about.
This provides
:class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3`
instances if tor is using microdescriptors...
::
controller.get_conf('UseMicrodescriptors', '0') == '1'
... and :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`
otherwise.
:param list default: items to provide if the query fails
:returns: iterates over
:class:`~stem.descriptor.router_status_entry.RouterStatusEntry` for
relays in the tor network
:raises: :class:`stem.ControllerError` if unable to query tor and no
default was provided
"""
# TODO: We should iterate over the descriptors as they're read from the
# socket rather than reading the whole thing into memory.
#
# https://trac.torproject.org/8248
if self.get_conf('UseMicrodescriptors', '0') == '1':
desc_class = stem.descriptor.router_status_entry.RouterStatusEntryMicroV3
else:
desc_class = stem.descriptor.router_status_entry.RouterStatusEntryV3
desc_content = self.get_info('ns/all', get_bytes = True)
if not desc_content:
raise stem.DescriptorUnavailable('Descriptor information is unavailable, tor might still be downloading it')
desc_iterator = stem.descriptor.router_status_entry._parse_file(
io.BytesIO(desc_content),
True,
entry_class = desc_class,
)
for desc in desc_iterator:
yield desc
@with_default()
def get_hidden_service_descriptor(self, address, default = UNDEFINED, servers = None, await_result = True):
"""
get_hidden_service_descriptor(address, default = UNDEFINED, servers = None, await_result = True)
Provides the descriptor for a hidden service. The **address** is the
'.onion' address of the hidden service (for instance 3g2upl4pq6kufc4m.onion
for DuckDuckGo).
If **await_result** is **True** then this blocks until we either receive
the descriptor or the request fails. If **False** this returns right away.
.. versionadded:: 1.4.0
:param str address: address of the hidden service descriptor, the '.onion' suffix is optional
:param object default: response if the query fails
:param list servers: requrest the descriptor from these specific servers
:returns: :class:`~stem.descriptor.hidden_service_descriptor.HiddenServiceDescriptor`
for the given service if **await_result** is **True**, or **None** otherwise
:raises:
* :class:`stem.DescriptorUnavailable` if **await_result** is **True** and
unable to provide a descriptor for the given service
* :class:`stem.ControllerError` if unable to query the descriptor
* **ValueError** if **address** doesn't conform with the pattern of a
hidden service address
An exception is only raised if we weren't provided a default response.
"""
if address.endswith('.onion'):
address = address[:-6]
if not stem.util.tor_tools.is_valid_hidden_service_address(address):
raise ValueError("'%s.onion' isn't a valid hidden service address" % address)
if self.get_version() < stem.version.Requirement.HSFETCH:
raise stem.UnsatisfiableRequest(message = 'HSFETCH was added in tor version %s' % stem.version.Requirement.HSFETCH)
hs_desc_queue, hs_desc_listener = queue.Queue(), None
hs_desc_content_queue, hs_desc_content_listener = queue.Queue(), None
if await_result:
def hs_desc_listener(event):
hs_desc_queue.put(event)
def hs_desc_content_listener(event):
hs_desc_content_queue.put(event)
self.add_event_listener(hs_desc_listener, EventType.HS_DESC)
self.add_event_listener(hs_desc_content_listener, EventType.HS_DESC_CONTENT)
try:
request = 'HSFETCH %s' % address
if servers:
request += ' '.join(['SERVER=%s' % s for s in servers])
response = self.msg(request)
stem.response.convert('SINGLELINE', response)
if not response.is_ok():
raise stem.ProtocolError('HSFETCH returned unexpected response code: %s' % response.code)
if not await_result:
return None # not waiting, so nothing to provide back
else:
while True:
event = hs_desc_content_queue.get()
if event.address == address:
if event.descriptor:
return event.descriptor
else:
# no descriptor, looking through HS_DESC to figure out why
while True:
event = hs_desc_queue.get()
if event.address == address and event.action == stem.HSDescAction.FAILED:
if event.reason == stem.HSDescReason.NOT_FOUND:
raise stem.DescriptorUnavailable('No running hidden service at %s.onion' % address)
else:
raise stem.DescriptorUnavailable('Unable to retrieve the descriptor for %s.onion (retrieved from %s): %s' % (address, event.directory_fingerprint, event.reason))
finally:
if hs_desc_listener:
self.remove_event_listener(hs_desc_listener)
if hs_desc_content_listener:
self.remove_event_listener(hs_desc_content_listener)
def get_conf(self, param, default = UNDEFINED, multiple = False):
"""
Queries the current value for a configuration option. Some configuration
options (like the ExitPolicy) can have multiple values. This provides a
**list** with all of the values if **multiple** is **True**. Otherwise this
will be a **str** with the first value.
If provided with a **default** then that is provided if the configuration
option was unset or the query fails (invalid configuration option, error
response, control port closed, initiated, etc).
If the configuration value is unset and no **default** was given then this
provides **None** if **multiple** was **False** and an empty list if it was
**True**.
:param str param: configuration option to be queried
:param object default: response if the option is unset or the query fails
:param bool multiple: if **True** then provides a list with all of the
present values (this is an empty list if the config option is unset)
:returns:
Response depends upon how we were called as follows...
* **str** with the configuration value if **multiple** was **False**,
**None** if it was unset
* **list** with the response strings if multiple was **True**
* default if one was provided and the configuration option was either
unset or our call failed
:raises:
* :class:`stem.ControllerError` if the call fails and we weren't
provided a default response
* :class:`stem.InvalidArguments` if the configuration option
requested was invalid
"""
# Config options are case insensitive and don't contain whitespace. Using
# strip so the following check will catch whitespace-only params.
param = param.lower().strip()
if not param:
return default if default != UNDEFINED else None
entries = self.get_conf_map(param, default, multiple)
return _case_insensitive_lookup(entries, param, default)
def get_conf_map(self, params, default = UNDEFINED, multiple = True):
"""
Similar to :func:`~stem.control.Controller.get_conf` but queries multiple
configuration options, providing back a mapping of those options to their
values.
There are three use cases for GETCONF:
1. a single value is provided (e.g. **ControlPort**)
2. multiple values are provided for the option (e.g. **ExitPolicy**)
3. a set of options that weren't necessarily requested are returned (for
instance querying **HiddenServiceOptions** gives **HiddenServiceDir**,
**HiddenServicePort**, etc)
The vast majority of the options fall into the first two categories, in
which case calling :func:`~stem.control.Controller.get_conf` is sufficient.
However, for batch queries or the special options that give a set of values
this provides back the full response. As of tor version 0.2.1.25
**HiddenServiceOptions** was the only option that falls into the third
category.
:param str,list params: configuration option(s) to be queried
:param object default: value for the mappings if the configuration option
is either undefined or the query fails
:param bool multiple: if **True** then the values provided are lists with
all of the present values
:returns:
**dict** of the 'config key => value' mappings. The value is a...
* **str** if **multiple** is **False**, **None** if the configuration
option is unset
* **list** if **multiple** is **True**
* the **default** if it was set and the value was either undefined or our
lookup failed
:raises:
* :class:`stem.ControllerError` if the call fails and we weren't provided
a default response
* :class:`stem.InvalidArguments` if the configuration option requested
was invalid
"""
start_time = time.time()
reply = {}
if isinstance(params, (bytes, str_type)):
params = [params]
# remove strings which contain only whitespace
params = [entry for entry in params if entry.strip()]
if params == []:
return {}
# translate context sensitive options
lookup_params = set([MAPPED_CONFIG_KEYS.get(entry, entry) for entry in params])
# check for cached results
from_cache = [param.lower() for param in lookup_params]
cached_results = self._get_cache_map(from_cache, 'getconf')
for key in cached_results:
user_expected_key = _case_insensitive_lookup(lookup_params, key)
reply[user_expected_key] = cached_results[key]
lookup_params.remove(user_expected_key)
# if everything was cached then short circuit making the query
if not lookup_params:
log.trace('GETCONF %s (cache fetch)' % ' '.join(reply.keys()))
return self._get_conf_dict_to_response(reply, default, multiple)
try:
response = self.msg('GETCONF %s' % ' '.join(lookup_params))
stem.response.convert('GETCONF', response)
reply.update(response.entries)
if self.is_caching_enabled():
to_cache = dict((k.lower(), v) for k, v in response.entries.items())
for key in UNCACHEABLE_GETCONF_PARAMS:
if key in to_cache:
del to_cache[key]
self._set_cache(to_cache, 'getconf')
# Maps the entries back to the parameters that the user requested so the
# capitalization matches (ie, if they request "exitpolicy" then that
# should be the key rather than "ExitPolicy"). When the same
# configuration key is provided multiple times this determines the case
# based on the first and ignores the rest.
#
# This retains the tor provided camel casing of MAPPED_CONFIG_KEYS
# entries since the user didn't request those by their key, so we can't
# be sure what they wanted.
for key in reply:
if not key.lower() in MAPPED_CONFIG_KEYS.values():
user_expected_key = _case_insensitive_lookup(params, key, key)
if key != user_expected_key:
reply[user_expected_key] = reply[key]
del reply[key]
log.debug('GETCONF %s (runtime: %0.4f)' % (' '.join(lookup_params), time.time() - start_time))
return self._get_conf_dict_to_response(reply, default, multiple)
except stem.ControllerError as exc:
log.debug('GETCONF %s (failed: %s)' % (' '.join(lookup_params), exc))
if default != UNDEFINED:
return dict((param, default) for param in params)
else:
raise exc
def _get_conf_dict_to_response(self, config_dict, default, multiple):
"""
Translates a dictionary of 'config key => [value1, value2...]' into the
return value of :func:`~stem.control.Controller.get_conf_map`, taking into
account what the caller requested.
"""
return_dict = {}
for key, values in list(config_dict.items()):
if values == []:
# config option was unset
if default != UNDEFINED:
return_dict[key] = default
else:
return_dict[key] = [] if multiple else None
else:
return_dict[key] = values if multiple else values[0]
return return_dict
def set_conf(self, param, value):
"""
Changes the value of a tor configuration option. Our value can be any of
the following...
* a string to set a single value
* a list of strings to set a series of values (for instance the ExitPolicy)
* None to either set the value to 0/NULL
:param str param: configuration option to be set
:param str,list value: value to set the parameter to
:raises:
* :class:`stem.ControllerError` if the call fails
* :class:`stem.InvalidArguments` if configuration options
requested was invalid
* :class:`stem.InvalidRequest` if the configuration setting is
impossible or if there's a syntax error in the configuration values
"""
self.set_options({param: value}, False)
def reset_conf(self, *params):
"""
Reverts one or more parameters to their default values.
:param str params: configuration option to be reset
:raises:
* :class:`stem.ControllerError` if the call fails
* :class:`stem.InvalidArguments` if configuration options requested was invalid
* :class:`stem.InvalidRequest` if the configuration setting is
impossible or if there's a syntax error in the configuration values
"""
self.set_options(dict([(entry, None) for entry in params]), True)
def set_options(self, params, reset = False):
"""
Changes multiple tor configuration options via either a SETCONF or
RESETCONF query. Both behave identically unless our value is None, in which
case SETCONF sets the value to 0 or NULL, and RESETCONF returns it to its
default value. This accepts str, list, or None values in a similar fashion
to :func:`~stem.control.Controller.set_conf`. For example...
::
my_controller.set_options({
'Nickname': 'caerSidi',
'ExitPolicy': ['accept *:80', 'accept *:443', 'reject *:*'],
'ContactInfo': 'caerSidi-exit@someplace.com',
'Log': None,
})
The params can optionally be a list of key/value tuples, though the only
reason this type of argument would be useful is for hidden service
configuration (those options are order dependent).
:param dict,list params: mapping of configuration options to the values
we're setting it to
:param bool reset: issues a RESETCONF, returning **None** values to their
defaults if **True**
:raises:
* :class:`stem.ControllerError` if the call fails
* :class:`stem.InvalidArguments` if configuration options
requested was invalid
* :class:`stem.InvalidRequest` if the configuration setting is
impossible or if there's a syntax error in the configuration values
"""
start_time = time.time()
# constructs the SETCONF or RESETCONF query
query_comp = ['RESETCONF' if reset else 'SETCONF']
if isinstance(params, dict):
params = list(params.items())
for param, value in params:
if isinstance(value, str):
query_comp.append('%s="%s"' % (param, value.strip()))
elif value:
query_comp.extend(['%s="%s"' % (param, val.strip()) for val in value])
else:
query_comp.append(param)
query = ' '.join(query_comp)
response = self.msg(query)
stem.response.convert('SINGLELINE', response)
if response.is_ok():
log.debug('%s (runtime: %0.4f)' % (query, time.time() - start_time))
if self.is_caching_enabled():
to_cache = {}
for param, value in params:
param = param.lower()
if isinstance(value, (bytes, str_type)):
value = [value]
to_cache[param] = value
if param == 'exitpolicy':
self._set_cache({'exitpolicy': None})
self._set_cache(to_cache, 'getconf')
else:
log.debug('%s (failed, code: %s, message: %s)' % (query, response.code, response.message))
if response.code == '552':
if response.message.startswith("Unrecognized option: Unknown option '"):
key = response.message[37:response.message.find("'", 37)]
raise stem.InvalidArguments(response.code, response.message, [key])
raise stem.InvalidRequest(response.code, response.message)
elif response.code in ('513', '553'):
raise stem.InvalidRequest(response.code, response.message)
else:
raise stem.ProtocolError('Returned unexpected status code: %s' % response.code)
@with_default()
def get_hidden_service_conf(self, default = UNDEFINED):
"""
get_hidden_service_conf(default = UNDEFINED)
This provides a mapping of hidden service directories to their
attribute's key/value pairs. All hidden services are assured to have a
'HiddenServicePort', but other entries may or may not exist.
::
{
"/var/lib/tor/hidden_service_empty/": {
"HiddenServicePort": [
]
},
"/var/lib/tor/hidden_service_with_two_ports/": {
"HiddenServiceAuthorizeClient": "stealth a, b",
"HiddenServicePort": [
(8020, "127.0.0.1", 8020), # the ports order is kept
(8021, "127.0.0.1", 8021)
],
"HiddenServiceVersion": "2"
},
}
.. versionadded:: 1.3.0
:param object default: response if the query fails
:returns: **dict** with the hidden service configuration
:raises: :class:`stem.ControllerError` if the call fails and we weren't
provided a default response
"""
start_time = time.time()
try:
response = self.msg('GETCONF HiddenServiceOptions')
stem.response.convert('GETCONF', response)
log.debug('GETCONF HiddenServiceOptions (runtime: %0.4f)' %
(time.time() - start_time))
except stem.ControllerError as exc:
log.debug('GETCONF HiddenServiceOptions (failed: %s)' % exc)
raise exc
service_dir_map = OrderedDict()
directory = None
for status_code, divider, content in response.content():
if content == 'HiddenServiceOptions':
continue
if '=' not in content:
continue
k, v = content.split('=', 1)
if k == 'HiddenServiceDir':
directory = v
service_dir_map[directory] = {'HiddenServicePort': []}
elif k == 'HiddenServicePort':
port = target_port = v
target_address = '127.0.0.1'
if not v.isdigit():
port, target = v.split()
if target.isdigit():
target_port = target
else:
target_address, target_port = target.split(':')
if not stem.util.connection.is_valid_port(port):
raise stem.ProtocolError('GETCONF provided an invalid HiddenServicePort port (%s): %s' % (port, content))
elif not stem.util.connection.is_valid_ipv4_address(target_address):
raise stem.ProtocolError('GETCONF provided an invalid HiddenServicePort target address (%s): %s' % (target_address, content))
elif not stem.util.connection.is_valid_port(target_port):
raise stem.ProtocolError('GETCONF provided an invalid HiddenServicePort target port (%s): %s' % (target_port, content))
service_dir_map[directory]['HiddenServicePort'].append((int(port), target_address, int(target_port)))
else:
service_dir_map[directory][k] = v
return service_dir_map
def set_hidden_service_conf(self, conf):
"""
Update all the configured hidden services from a dictionary having
the same format as
:func:`~stem.control.Controller.get_hidden_service_conf`.
For convenience the HiddenServicePort entries can be an integer, string, or
tuple. If an **int** then we treat it as just a port. If a **str** we pass
that directly as the HiddenServicePort. And finally, if a **tuple** then
it's expected to be the **(port, target_address, target_port)** as provided
by :func:`~stem.control.Controller.get_hidden_service_conf`.
This is to say the following three are equivalent...
::
"HiddenServicePort": [
80,
'80 127.0.0.1:80',
(80, '127.0.0.1', 80),
]
.. versionadded:: 1.3.0
:param dict conf: configuration dictionary
:raises:
* :class:`stem.ControllerError` if the call fails
* :class:`stem.InvalidArguments` if configuration options
requested was invalid
* :class:`stem.InvalidRequest` if the configuration setting is
impossible or if there's a syntax error in the configuration values
"""
# If we're not adding or updating any hidden services then call RESETCONF
# so we drop existing values. Otherwise calling SETCONF is a no-op.
if not conf:
self.reset_conf('HiddenServiceDir')
return
# Convert conf dictionary into a list of ordered config tuples
hidden_service_options = []
for directory in conf:
hidden_service_options.append(('HiddenServiceDir', directory))
for k, v in list(conf[directory].items()):
if k == 'HiddenServicePort':
for entry in v:
if isinstance(entry, int):
entry = '%s 127.0.0.1:%s' % (entry, entry)
elif isinstance(entry, str):
pass # just pass along what the user gave us
elif isinstance(entry, tuple):
port, target_address, target_port = entry
entry = '%s %s:%s' % (port, target_address, target_port)
hidden_service_options.append(('HiddenServicePort', entry))
else:
hidden_service_options.append((k, str(v)))
self.set_options(hidden_service_options)
def create_hidden_service(self, path, port, target_address = None, target_port = None, auth_type = None, client_names = None):
"""
Create a new hidden service. If the directory is already present, a
new port is added. This provides a **namedtuple** of the following...
* path (str) - hidden service directory
* hostname (str) - Content of the hostname file, if no **client_names**
are provided this is the onion address of the service. This is only
retrieved if we can read the hidden service directory.
* hostname_for_client (dict) - mapping of client names to their onion
address, this is only set if the **client_names** was provided and we
can read the hidden service directory
* config (dict) - tor's new hidden service configuration
Our *.onion address is fetched by reading the hidden service directory.
However, this directory is only readable by the tor user, so if unavailable
the **hostname** will be **None**.
**As of Tor 0.2.7.1 there's two ways for creating hidden services. This is
no longer the recommended method.** Rather, try using
:func:`~stem.control.Controller.create_ephemeral_hidden_service` instead.
.. versionadded:: 1.3.0
.. versionchanged:: 1.4.0
Added the auth_type and client_names arguments.
:param str path: path for the hidden service's data directory
:param int port: hidden service port
:param str target_address: address of the service, by default 127.0.0.1
:param int target_port: port of the service, by default this is the same as
**port**
:param str auth_type: authentication type: basic, stealth or None to disable auth
:param list client_names: client names (1-16 characters "A-Za-z0-9+-_")
:returns: **CreateHiddenServiceOutput** if we create or update a hidden service, **None** otherwise
:raises: :class:`stem.ControllerError` if the call fails
"""
if not stem.util.connection.is_valid_port(port):
raise ValueError("%s isn't a valid port number" % port)
elif target_address and not stem.util.connection.is_valid_ipv4_address(target_address):
raise ValueError("%s isn't a valid IPv4 address" % target_address)
elif target_port is not None and not stem.util.connection.is_valid_port(target_port):
raise ValueError("%s isn't a valid port number" % target_port)
elif auth_type not in (None, 'basic', 'stealth'):
raise ValueError("%s isn't a recognized type of authentication" % auth_type)
port = int(port)
target_address = target_address if target_address else '127.0.0.1'
target_port = port if target_port is None else int(target_port)
conf = self.get_hidden_service_conf()
if path in conf and (port, target_address, target_port) in conf[path]['HiddenServicePort']:
return None
conf.setdefault(path, OrderedDict()).setdefault('HiddenServicePort', []).append((port, target_address, target_port))
if auth_type and client_names:
hsac = "%s %s" % (auth_type, ','.join(client_names))
conf[path]['HiddenServiceAuthorizeClient'] = hsac
self.set_hidden_service_conf(conf)
hostname, hostname_for_client = None, {}
if self.is_localhost():
hostname_path = os.path.join(path, 'hostname')
if not os.path.isabs(hostname_path):
cwd = stem.util.system.cwd(self.get_pid(None))
if cwd:
hostname_path = stem.util.system.expand_path(hostname_path, cwd)
if os.path.isabs(hostname_path):
start_time = time.time()
while not os.path.exists(hostname_path):
wait_time = time.time() - start_time
if wait_time >= 3:
break
else:
time.sleep(0.05)
try:
with open(hostname_path) as hostname_file:
hostname = hostname_file.read().strip()
if client_names and '\n' in hostname:
# When there's multiple clients this looks like...
#
# ndisjxzkgcdhrwqf.onion sjUwjTSPznqWLdOPuwRUzg # client: c1
# ndisjxzkgcdhrwqf.onion sUu92axuL5bKnA76s2KRfw # client: c2
for line in hostname.splitlines():
if ' # client: ' in line:
address = line.split()[0]
client = line.split(' # client: ', 1)[1]
if len(address) == 22 and address.endswith('.onion'):
hostname_for_client[client] = address
except:
pass
return CreateHiddenServiceOutput(
path = path,
hostname = hostname,
hostname_for_client = hostname_for_client,
config = conf,
)
def remove_hidden_service(self, path, port = None):
"""
Discontinues a given hidden service.
.. versionadded:: 1.3.0
:param str path: path for the hidden service's data directory
:param int port: hidden service port
:returns: **True** if the hidden service is discontinued, **False** if it
wasn't running in the first place
:raises: :class:`stem.ControllerError` if the call fails
"""
if port and not stem.util.connection.is_valid_port(port):
raise ValueError("%s isn't a valid port number" % port)
port = int(port) if port else None
conf = self.get_hidden_service_conf()
if path not in conf:
return False
if not port:
del conf[path]
else:
to_remove = [entry for entry in conf[path]['HiddenServicePort'] if entry[0] == port]
if not to_remove:
return False
for entry in to_remove:
conf[path]['HiddenServicePort'].remove(entry)
if not conf[path]['HiddenServicePort']:
del conf[path] # no ports left
self.set_hidden_service_conf(conf)
return True
@with_default()
def list_ephemeral_hidden_services(self, default = UNDEFINED, our_services = True, detached = False):
"""
list_ephemeral_hidden_services(default = UNDEFINED, our_services = True, detached = False)
Lists hidden service addresses created by
:func:`~stem.control.Controller.create_ephemeral_hidden_service`.
.. versionadded:: 1.4.0
:param object default: response if the query fails
:param bool our_services: include services created with this controller
that weren't flagged as 'detached'
:param bool detached: include services whos contiuation isn't tied to a
controller
:returns: **list** of hidden service addresses without their '.onion'
suffix
:raises: :class:`stem.ControllerError` if the call fails and we weren't
provided a default response
"""
if self.get_version() < stem.version.Requirement.ADD_ONION:
raise stem.UnsatisfiableRequest(message = 'Ephemeral hidden services were added in tor version %s' % stem.version.Requirement.ADD_ONION)
result = []
if our_services:
try:
result += self.get_info('onions/current').split('\n')
except stem.ProtocolError as exc:
if 'No onion services of the specified type.' not in str(exc):
raise exc
if detached:
try:
result += self.get_info('onions/detached').split('\n')
except stem.ProtocolError as exc:
if 'No onion services of the specified type.' not in str(exc):
raise exc
return result
def create_ephemeral_hidden_service(self, ports, key_type = 'NEW', key_content = 'BEST', discard_key = False, detached = False, await_publication = False):
"""
Creates a new hidden service. Unlike
:func:`~stem.control.Controller.create_hidden_service` this style of
hidden service doesn't touch disk, carrying with it a lot of advantages.
This is the suggested method for making hidden services.
Our **ports** argument can be a single port...
::
create_ephemeral_hidden_service(80)
... list of ports the service is available on...
::
create_ephemeral_hidden_service([80, 443])
... or a mapping of hidden service ports to their targets...
::
create_ephemeral_hidden_service({80: 80, 443: '173.194.33.133:443'})
.. versionadded:: 1.4.0
:param int,list,dict ports: hidden service port(s) or mapping of hidden
service ports to their targets
:param str key_type: type of key being provided, generates a new key if
'NEW' (options are: **NEW** and **RSA1024**)
:param str key_content: key for the service to use or type of key to be
generated (options when **key_type** is **NEW** are **BEST** and
**RSA1024**)
:param bool discard_key: avoid providing the key back in our response
:param bool detached: continue this hidden service even after this control
connection is closed if **True**
:param bool await_publication: blocks until our descriptor is successfully
published if **True**
:returns: :class:`~stem.response.add_onion.AddOnionResponse` with the response
:raises: :class:`stem.ControllerError` if the call fails
"""
if self.get_version() < stem.version.Requirement.ADD_ONION:
raise stem.UnsatisfiableRequest(message = 'Ephemeral hidden services were added in tor version %s' % stem.version.Requirement.ADD_ONION)
hs_desc_queue, hs_desc_listener = queue.Queue(), None
if await_publication:
def hs_desc_listener(event):
hs_desc_queue.put(event)
self.add_event_listener(hs_desc_listener, EventType.HS_DESC)
request = 'ADD_ONION %s:%s' % (key_type, key_content)
flags = []
if discard_key:
flags.append('DiscardPK')
if detached:
flags.append('Detach')
if flags:
request += ' Flags=%s' % ','.join(flags)
if isinstance(ports, int):
request += ' Port=%s' % ports
elif isinstance(ports, list):
for port in ports:
request += ' Port=%s' % port
elif isinstance(ports, dict):
for port, target in ports.items():
request += ' Port=%s,%s' % (port, target)
else:
raise ValueError("The 'ports' argument of create_ephemeral_hidden_service() needs to be an int, list, or dict")
response = self.msg(request)
stem.response.convert('ADD_ONION', response)
if await_publication:
# We should receive five UPLOAD events, followed by up to another five
# UPLOADED to indicate they've finished. Presently tor seems to have an
# issue where the address is provided for UPLOAD but not UPLOADED so need
# to just guess that if it's for the same hidden service authority then
# it's what we're looking for.
directories_uploaded_to, failures = [], []
try:
while True:
event = hs_desc_queue.get()
if event.action == stem.HSDescAction.UPLOAD and event.address == response.service_id:
directories_uploaded_to.append(event.directory_fingerprint)
elif event.action == stem.HSDescAction.UPLOADED and event.directory_fingerprint in directories_uploaded_to:
break # successfully uploaded to a HS authority... maybe
elif event.action == stem.HSDescAction.FAILED and event.directory_fingerprint in directories_uploaded_to:
failures.append('%s (%s)' % (event.directory_fingerprint, event.reason))
if len(directories_uploaded_to) == len(failures):
raise stem.OperationFailed(message = 'Failed to upload our hidden service descriptor to %s' % ', '.join(failures))
finally:
self.remove_event_listener(hs_desc_listener)
return response
def remove_ephemeral_hidden_service(self, service_id):
"""
Discontinues a given hidden service that was created with
:func:`~stem.control.Controller.create_ephemeral_hidden_service`.
.. versionadded:: 1.4.0
:param str service_id: hidden service address without the '.onion' suffix
:returns: **True** if the hidden service is discontinued, **False** if it
wasn't running in the first place
:raises: :class:`stem.ControllerError` if the call fails
"""
if self.get_version() < stem.version.Requirement.ADD_ONION:
raise stem.UnsatisfiableRequest(message = 'Ephemeral hidden services were added in tor version %s' % stem.version.Requirement.ADD_ONION)
response = self.msg('DEL_ONION %s' % service_id)
stem.response.convert('SINGLELINE', response)
if response.is_ok():
return True
elif response.code == '552':
return False # no hidden service to discontinue
else:
raise stem.ProtocolError('DEL_ONION returned unexpected response code: %s' % response.code)
def add_event_listener(self, listener, *events):
"""
Directs further tor controller events to a given function. The function is
expected to take a single argument, which is a
:class:`~stem.response.events.Event` subclass. For instance the following
would print the bytes sent and received by tor over five seconds...
::
import time
from stem.control import Controller, EventType
def print_bw(event):
print('sent: %i, received: %i' % (event.written, event.read))
with Controller.from_port(port = 9051) as controller:
controller.authenticate()
controller.add_event_listener(print_bw, EventType.BW)
time.sleep(5)
If a new control connection is initialized then this listener will be
reattached.
:param functor listener: function to be called when an event is received
:param stem.control.EventType events: event types to be listened for
:raises: :class:`stem.ProtocolError` if unable to set the events
"""
# first checking that tor supports these event types
with self._event_listeners_lock:
if self.is_authenticated():
for event_type in events:
event_type = stem.response.events.EVENT_TYPE_TO_CLASS.get(event_type)
if event_type and (self.get_version() < event_type._VERSION_ADDED):
raise stem.InvalidRequest(552, '%s event requires Tor version %s or later' % (event_type, event_type._VERSION_ADDED))
for event_type in events:
self._event_listeners.setdefault(event_type, []).append(listener)
failed_events = self._attach_listeners()[1]
# restricted the failures to just things we requested
failed_events = set(failed_events).intersection(set(events))
if failed_events:
raise stem.ProtocolError('SETEVENTS rejected %s' % ', '.join(failed_events))
def remove_event_listener(self, listener):
"""
Stops a listener from being notified of further tor events.
:param stem.control.EventListener listener: listener to be removed
:raises: :class:`stem.ProtocolError` if unable to set the events
"""
with self._event_listeners_lock:
event_types_changed = False
for event_type, event_listeners in list(self._event_listeners.items()):
if listener in event_listeners:
event_listeners.remove(listener)
if len(event_listeners) == 0:
event_types_changed = True
del self._event_listeners[event_type]
if event_types_changed:
response = self.msg('SETEVENTS %s' % ' '.join(self._event_listeners.keys()))
if not response.is_ok():
raise stem.ProtocolError('SETEVENTS received unexpected response\n%s' % response)
def _get_cache(self, param, namespace = None):
"""
Queries our request cache for the given key.
:param str param: key to be queried
:param str namespace: namespace in which to check for the key
:returns: cached value corresponding to key or **None** if the key wasn't found
"""
return self._get_cache_map([param], namespace).get(param, None)
def _get_cache_map(self, params, namespace = None):
"""
Queries our request cache for multiple entries.
:param list params: keys to be queried
:param str namespace: namespace in which to check for the keys
:returns: **dict** of 'param => cached value' pairs of keys present in cache
"""
with self._cache_lock:
cached_values = {}
if self.is_caching_enabled():
for param in params:
if namespace:
cache_key = '%s.%s' % (namespace, param)
else:
cache_key = param
if cache_key in self._request_cache:
cached_values[param] = self._request_cache[cache_key]
return cached_values
def _set_cache(self, params, namespace = None):
"""
Sets the given request cache entries. If the new cache value is **None**
then it is removed from our cache.
:param dict params: **dict** of 'cache_key => value' pairs to be cached
:param str namespace: namespace for the keys
"""
with self._cache_lock:
if not self.is_caching_enabled():
return
for key, value in list(params.items()):
if namespace:
cache_key = '%s.%s' % (namespace, key)
else:
cache_key = key
if value is None:
if cache_key in self._request_cache:
del self._request_cache[cache_key]
else:
self._request_cache[cache_key] = value
def is_caching_enabled(self):
"""
**True** if caching has been enabled, **False** otherwise.
:returns: bool to indicate if caching is enabled
"""
return self._is_caching_enabled
def set_caching(self, enabled):
"""
Enables or disables caching of information retrieved from tor.
:param bool enabled: **True** to enable caching, **False** to disable it
"""
self._is_caching_enabled = enabled
if not self._is_caching_enabled:
self.clear_cache()
def clear_cache(self):
"""
Drops any cached results.
"""
with self._cache_lock:
self._request_cache = {}
self._last_newnym = 0.0
self._geoip_failure_count = 0
def load_conf(self, configtext):
"""
Sends the configuration text to Tor and loads it as if it has been read from
the torrc.
:param str configtext: the configuration text
:raises: :class:`stem.ControllerError` if the call fails
"""
response = self.msg('LOADCONF\n%s' % configtext)
stem.response.convert('SINGLELINE', response)
if response.code in ('552', '553'):
if response.code == '552' and response.message.startswith('Invalid config file: Failed to parse/validate config: Unknown option'):
raise stem.InvalidArguments(response.code, response.message, [response.message[70:response.message.find('.', 70) - 1]])
raise stem.InvalidRequest(response.code, response.message)
elif not response.is_ok():
raise stem.ProtocolError('+LOADCONF Received unexpected response\n%s' % str(response))
def save_conf(self):
"""
Saves the current configuration options into the active torrc file.
:raises:
* :class:`stem.ControllerError` if the call fails
* :class:`stem.OperationFailed` if the client is unable to save
the configuration file
"""
response = self.msg('SAVECONF')
stem.response.convert('SINGLELINE', response)
if response.is_ok():
return True
elif response.code == '551':
raise stem.OperationFailed(response.code, response.message)
else:
raise stem.ProtocolError('SAVECONF returned unexpected response code')
def is_feature_enabled(self, feature):
"""
Checks if a control connection feature is enabled. These features can be
enabled using :func:`~stem.control.Controller.enable_feature`.
:param str feature: feature to be checked
:returns: **True** if feature is enabled, **False** otherwise
"""
feature = feature.upper()
if feature in self._enabled_features:
return True
else:
# check if this feature is on by default
defaulted_version = None
if feature == 'EXTENDED_EVENTS':
defaulted_version = stem.version.Requirement.FEATURE_EXTENDED_EVENTS
elif feature == 'VERBOSE_NAMES':
defaulted_version = stem.version.Requirement.FEATURE_VERBOSE_NAMES
if defaulted_version:
our_version = self.get_version(None)
if our_version and our_version >= defaulted_version:
self._enabled_features.append(feature)
return feature in self._enabled_features
def enable_feature(self, features):
"""
Enables features that are disabled by default to maintain backward
compatibility. Once enabled, a feature cannot be disabled and a new
control connection must be opened to get a connection with the feature
disabled. Feature names are case-insensitive.
The following features are currently accepted:
* EXTENDED_EVENTS - Requests the extended event syntax
* VERBOSE_NAMES - Replaces ServerID with LongName in events and GETINFO results
:param str,list features: a single feature or a list of features to be enabled
:raises:
* :class:`stem.ControllerError` if the call fails
* :class:`stem.InvalidArguments` if features passed were invalid
"""
if isinstance(features, (bytes, str_type)):
features = [features]
response = self.msg('USEFEATURE %s' % ' '.join(features))
stem.response.convert('SINGLELINE', response)
if not response.is_ok():
if response.code == '552':
invalid_feature = []
if response.message.startswith('Unrecognized feature "'):
invalid_feature = [response.message[22:response.message.find('"', 22)]]
raise stem.InvalidArguments(response.code, response.message, invalid_feature)
raise stem.ProtocolError('USEFEATURE provided an invalid response code: %s' % response.code)
self._enabled_features += [entry.upper() for entry in features]
@with_default()
def get_circuit(self, circuit_id, default = UNDEFINED):
"""
get_circuit(circuit_id, default = UNDEFINED)
Provides a circuit currently available from tor.
:param int circuit_id: circuit to be fetched
:param object default: response if the query fails
:returns: :class:`stem.response.events.CircuitEvent` for the given circuit
:raises:
* :class:`stem.ControllerError` if the call fails
* **ValueError** if the circuit doesn't exist
An exception is only raised if we weren't provided a default response.
"""
for circ in self.get_circuits():
if circ.id == circuit_id:
return circ
raise ValueError("Tor currently does not have a circuit with the id of '%s'" % circuit_id)
@with_default()
def get_circuits(self, default = UNDEFINED):
"""
get_circuits(default = UNDEFINED)
Provides tor's currently available circuits.
:param object default: response if the query fails
:returns: **list** of :class:`stem.response.events.CircuitEvent` for our circuits
:raises: :class:`stem.ControllerError` if the call fails and no default was provided
"""
circuits = []
response = self.get_info('circuit-status')
for circ in response.splitlines():
circ_message = stem.socket.recv_message(StringIO('650 CIRC ' + circ + '\r\n'))
stem.response.convert('EVENT', circ_message, arrived_at = 0)
circuits.append(circ_message)
return circuits
def new_circuit(self, path = None, purpose = 'general', await_build = False):
"""
Requests a new circuit. If the path isn't provided, one is automatically
selected.
:param list,str path: one or more relays to make a circuit through
:param str purpose: 'general' or 'controller'
:param bool await_build: blocks until the circuit is built if **True**
:returns: str of the circuit id of the newly created circuit
:raises: :class:`stem.ControllerError` if the call fails
"""
return self.extend_circuit('0', path, purpose, await_build)
def extend_circuit(self, circuit_id = '0', path = None, purpose = 'general', await_build = False):
"""
Either requests the creation of a new circuit or extends an existing one.
When called with a circuit value of zero (the default) a new circuit is
created, and when non-zero the circuit with that id is extended. If the
path isn't provided, one is automatically selected.
A python interpreter session used to create circuits could look like this...
::
>>> controller.extend_circuit('0', ['718BCEA286B531757ACAFF93AE04910EA73DE617', '30BAB8EE7606CBD12F3CC269AE976E0153E7A58D', '2765D8A8C4BBA3F89585A9FFE0E8575615880BEB'])
19
>>> controller.extend_circuit('0')
20
>>> print(controller.get_info('circuit-status'))
20 EXTENDED $718BCEA286B531757ACAFF93AE04910EA73DE617=KsmoinOK,$649F2D0ACF418F7CFC6539AB2257EB2D5297BAFA=Eskimo BUILD_FLAGS=NEED_CAPACITY PURPOSE=GENERAL TIME_CREATED=2012-12-06T13:51:11.433755
19 BUILT $718BCEA286B531757ACAFF93AE04910EA73DE617=KsmoinOK,$30BAB8EE7606CBD12F3CC269AE976E0153E7A58D=Pascal1,$2765D8A8C4BBA3F89585A9FFE0E8575615880BEB=Anthracite PURPOSE=GENERAL TIME_CREATED=2012-12-06T13:50:56.969938
:param str circuit_id: id of a circuit to be extended
:param list,str path: one or more relays to make a circuit through, this is
required if the circuit id is non-zero
:param str purpose: 'general' or 'controller'
:param bool await_build: blocks until the circuit is built if **True**
:returns: str of the circuit id of the created or extended circuit
:raises:
* :class:`stem.InvalidRequest` if one of the parameters were invalid
* :class:`stem.CircuitExtensionFailed` if we were waiting for the circuit
to build but it failed
* :class:`stem.ControllerError` if the call fails
"""
# Attaches a temporary listener for CIRC events if we'll be waiting for it
# to build. This is icky, but we can't reliably do this via polling since
# we then can't get the failure if it can't be created.
circ_queue, circ_listener = queue.Queue(), None
if await_build:
def circ_listener(event):
circ_queue.put(event)
self.add_event_listener(circ_listener, EventType.CIRC)
try:
# we might accidently get integer circuit ids
circuit_id = str(circuit_id)
if path is None and circuit_id == '0':
path_opt_version = stem.version.Requirement.EXTENDCIRCUIT_PATH_OPTIONAL
if not self.get_version() >= path_opt_version:
raise stem.InvalidRequest(512, 'EXTENDCIRCUIT requires the path prior to version %s' % path_opt_version)
args = [circuit_id]
if isinstance(path, (bytes, str_type)):
path = [path]
if path:
args.append(','.join(path))
if purpose:
args.append('purpose=%s' % purpose)
response = self.msg('EXTENDCIRCUIT %s' % ' '.join(args))
stem.response.convert('SINGLELINE', response)
if response.code in ('512', '552'):
raise stem.InvalidRequest(response.code, response.message)
elif not response.is_ok():
raise stem.ProtocolError('EXTENDCIRCUIT returned unexpected response code: %s' % response.code)
if not response.message.startswith('EXTENDED '):
raise stem.ProtocolError('EXTENDCIRCUIT response invalid:\n%s', response)
new_circuit = response.message.split(' ', 1)[1]
if await_build:
while True:
circ = circ_queue.get()
if circ.id == new_circuit:
if circ.status == CircStatus.BUILT:
break
elif circ.status == CircStatus.FAILED:
raise stem.CircuitExtensionFailed('Circuit failed to be created: %s' % circ.reason, circ)
elif circ.status == CircStatus.CLOSED:
raise stem.CircuitExtensionFailed('Circuit was closed prior to build', circ)
return new_circuit
finally:
if circ_listener:
self.remove_event_listener(circ_listener)
def repurpose_circuit(self, circuit_id, purpose):
"""
Changes a circuit's purpose. Currently, two purposes are recognized...
* general
* controller
:param str circuit_id: id of the circuit whose purpose is to be changed
:param str purpose: purpose (either 'general' or 'controller')
:raises: :class:`stem.InvalidArguments` if the circuit doesn't exist or if the purpose was invalid
"""
response = self.msg('SETCIRCUITPURPOSE %s purpose=%s' % (circuit_id, purpose))
stem.response.convert('SINGLELINE', response)
if not response.is_ok():
if response.code == '552':
raise stem.InvalidRequest(response.code, response.message)
else:
raise stem.ProtocolError('SETCIRCUITPURPOSE returned unexpected response code: %s' % response.code)
def close_circuit(self, circuit_id, flag = ''):
"""
Closes the specified circuit.
:param str circuit_id: id of the circuit to be closed
:param str flag: optional value to modify closing, the only flag available
is 'IfUnused' which will not close the circuit unless it is unused
:raises: :class:`stem.InvalidArguments` if the circuit is unknown
:raises: :class:`stem.InvalidRequest` if not enough information is provided
"""
response = self.msg('CLOSECIRCUIT %s %s' % (circuit_id, flag))
stem.response.convert('SINGLELINE', response)
if not response.is_ok():
if response.code in ('512', '552'):
if response.message.startswith('Unknown circuit '):
raise stem.InvalidArguments(response.code, response.message, [circuit_id])
raise stem.InvalidRequest(response.code, response.message)
else:
raise stem.ProtocolError('CLOSECIRCUIT returned unexpected response code: %s' % response.code)
@with_default()
def get_streams(self, default = UNDEFINED):
"""
get_streams(default = UNDEFINED)
Provides the list of streams tor is currently handling.
:param object default: response if the query fails
:returns: list of :class:`stem.response.events.StreamEvent` objects
:raises: :class:`stem.ControllerError` if the call fails and no default was
provided
"""
streams = []
response = self.get_info('stream-status')
for stream in response.splitlines():
message = stem.socket.recv_message(StringIO('650 STREAM ' + stream + '\r\n'))
stem.response.convert('EVENT', message, arrived_at = 0)
streams.append(message)
return streams
def attach_stream(self, stream_id, circuit_id, exiting_hop = None):
"""
Attaches a stream to a circuit.
Note: Tor attaches streams to circuits automatically unless the
__LeaveStreamsUnattached configuration variable is set to '1'
:param str stream_id: id of the stream that must be attached
:param str circuit_id: id of the circuit to which it must be attached
:param int exiting_hop: hop in the circuit where traffic should exit
:raises:
* :class:`stem.InvalidRequest` if the stream or circuit id were unrecognized
* :class:`stem.UnsatisfiableRequest` if the stream isn't in a state where it can be attached
* :class:`stem.OperationFailed` if the stream couldn't be attached for any other reason
"""
query = 'ATTACHSTREAM %s %s' % (stream_id, circuit_id)
if exiting_hop:
query += ' HOP=%s' % exiting_hop
response = self.msg(query)
stem.response.convert('SINGLELINE', response)
if not response.is_ok():
if response.code == '552':
raise stem.InvalidRequest(response.code, response.message)
elif response.code == '551':
raise stem.OperationFailed(response.code, response.message)
elif response.code == '555':
raise stem.UnsatisfiableRequest(response.code, response.message)
else:
raise stem.ProtocolError('ATTACHSTREAM returned unexpected response code: %s' % response.code)
def close_stream(self, stream_id, reason = stem.RelayEndReason.MISC, flag = ''):
"""
Closes the specified stream.
:param str stream_id: id of the stream to be closed
:param stem.RelayEndReason reason: reason the stream is closing
:param str flag: not currently used
:raises:
* :class:`stem.InvalidArguments` if the stream or reason are not recognized
* :class:`stem.InvalidRequest` if the stream and/or reason are missing
"""
# there's a single value offset between RelayEndReason.index_of() and the
# value that tor expects since tor's value starts with the index of one
response = self.msg('CLOSESTREAM %s %s %s' % (stream_id, stem.RelayEndReason.index_of(reason) + 1, flag))
stem.response.convert('SINGLELINE', response)
if not response.is_ok():
if response.code in ('512', '552'):
if response.message.startswith('Unknown stream '):
raise stem.InvalidArguments(response.code, response.message, [stream_id])
elif response.message.startswith('Unrecognized reason '):
raise stem.InvalidArguments(response.code, response.message, [reason])
raise stem.InvalidRequest(response.code, response.message)
else:
raise stem.ProtocolError('CLOSESTREAM returned unexpected response code: %s' % response.code)
def signal(self, signal):
"""
Sends a signal to the Tor client.
:param stem.Signal signal: type of signal to be sent
:raises: :class:`stem.InvalidArguments` if signal provided wasn't recognized
"""
response = self.msg('SIGNAL %s' % signal)
stem.response.convert('SINGLELINE', response)
if response.is_ok():
if signal == stem.Signal.NEWNYM:
self._last_newnym = time.time()
else:
if response.code == '552':
raise stem.InvalidArguments(response.code, response.message, [signal])
raise stem.ProtocolError('SIGNAL response contained unrecognized status code: %s' % response.code)
def is_newnym_available(self):
"""
Indicates if tor would currently accept a NEWNYM signal. This can only
account for signals sent via this controller.
.. versionadded:: 1.2.0
:returns: **True** if tor would currently accept a NEWNYM signal, **False**
otherwise
"""
if self.is_alive():
return self.get_newnym_wait() == 0.0
else:
return False
def get_newnym_wait(self):
"""
Provides the number of seconds until a NEWNYM signal would be respected.
This can only account for signals sent via this controller.
.. versionadded:: 1.2.0
:returns: **float** for the number of seconds until tor would respect
another NEWNYM signal
"""
return max(0.0, self._last_newnym + 10 - time.time())
@with_default()
def get_effective_rate(self, default = UNDEFINED, burst = False):
"""
get_effective_rate(default = UNDEFINED, burst = False)
Provides the maximum rate this relay is configured to relay in bytes per
second. This is based on multiple torrc parameters if they're set...
* Effective Rate = min(BandwidthRate, RelayBandwidthRate, MaxAdvertisedBandwidth)
* Effective Burst = min(BandwidthBurst, RelayBandwidthBurst)
.. versionadded:: 1.3.0
:param object default: response if the query fails
:param bool burst: provides the burst bandwidth, otherwise this provides
the standard rate
:returns: **int** with the effective bandwidth rate in bytes per second
:raises: :class:`stem.ControllerError` if the call fails and no default was
provided
"""
if not burst:
attributes = ('BandwidthRate', 'RelayBandwidthRate', 'MaxAdvertisedBandwidth')
else:
attributes = ('BandwidthBurst', 'RelayBandwidthBurst')
value = None
for attr in attributes:
attr_value = int(self.get_conf(attr))
if attr_value == 0 and attr.startswith('Relay'):
continue # RelayBandwidthRate and RelayBandwidthBurst default to zero
value = min(value, attr_value) if value else attr_value
return value
def is_geoip_unavailable(self):
"""
Provides **True** if we've concluded hat our geoip database is unavailable,
**False** otherwise. This is determined by having our 'GETINFO
ip-to-country/\*' lookups fail so this will default to **False** if we
aren't making those queries.
Geoip failures will be untracked if caching is disabled.
:returns: **bool** to indicate if we've concluded our geoip database to be
unavailable or not
"""
return self._geoip_failure_count >= GEOIP_FAILURE_THRESHOLD
def map_address(self, mapping):
"""
Map addresses to replacement addresses. Tor replaces subseqent connections
to the original addresses with the replacement addresses.
If the original address is a null address, i.e., one of '0.0.0.0', '::0', or
'.' Tor picks an original address itself and returns it in the reply. If the
original address is already mapped to a different address the mapping is
removed.
:param dict mapping: mapping of original addresses to replacement addresses
:raises:
* :class:`stem.InvalidRequest` if the addresses are malformed
* :class:`stem.OperationFailed` if Tor couldn't fulfill the request
:returns: **dict** with 'original -> replacement' address mappings
"""
mapaddress_arg = ' '.join(['%s=%s' % (k, v) for (k, v) in list(mapping.items())])
response = self.msg('MAPADDRESS %s' % mapaddress_arg)
stem.response.convert('MAPADDRESS', response)
return response.entries
def drop_guards(self):
"""
Drops our present guard nodes and picks a new set.
.. versionadded:: 1.2.0
:raises: :class:`stem.ControllerError` if Tor couldn't fulfill the request
"""
if self.get_version() < stem.version.Requirement.DROPGUARDS:
raise stem.UnsatisfiableRequest(message = 'DROPGUARDS was added in tor version %s' % stem.version.Requirement.DROPGUARDS)
self.msg('DROPGUARDS')
def _post_authentication(self):
super(Controller, self)._post_authentication()
# try to re-attach event listeners to the new instance
with self._event_listeners_lock:
try:
failed_events = self._attach_listeners()[1]
if failed_events:
# remove our listeners for these so we don't keep failing
for event_type in failed_events:
del self._event_listeners[event_type]
logging_id = 'stem.controller.event_reattach-%s' % '-'.join(failed_events)
log.log_once(logging_id, log.WARN, 'We were unable to re-attach our event listeners to the new tor instance for: %s' % ', '.join(failed_events))
except stem.ProtocolError as exc:
log.warn('Unable to issue the SETEVENTS request to re-attach our listeners (%s)' % exc)
# issue TAKEOWNERSHIP if we're the owning process for this tor instance
owning_pid = self.get_conf('__OwningControllerProcess', None)
if owning_pid == str(os.getpid()) and self.is_localhost():
response = self.msg('TAKEOWNERSHIP')
stem.response.convert('SINGLELINE', response)
if response.is_ok():
# Now that tor is tracking our ownership of the process via the control
# connection, we can stop having it check for us via our pid.
try:
self.reset_conf('__OwningControllerProcess')
except stem.ControllerError as exc:
log.warn("We were unable to reset tor's __OwningControllerProcess configuration. It will continue to periodically check if our pid exists. (%s)" % exc)
else:
log.warn('We were unable assert ownership of tor through TAKEOWNERSHIP, despite being configured to be the owning process through __OwningControllerProcess. (%s)' % response)
def _handle_event(self, event_message):
stem.response.convert('EVENT', event_message, arrived_at = time.time())
with self._event_listeners_lock:
for event_type, event_listeners in list(self._event_listeners.items()):
if event_type == event_message.type:
for listener in event_listeners:
listener(event_message)
def _attach_listeners(self):
"""
Attempts to subscribe to the self._event_listeners events from tor. This is
a no-op if we're not currently authenticated.
:returns: tuple of the form (set_events, failed_events)
:raises: :class:`stem.ControllerError` if unable to make our request to tor
"""
set_events, failed_events = [], []
with self._event_listeners_lock:
if self.is_authenticated():
# try to set them all
response = self.msg('SETEVENTS %s' % ' '.join(self._event_listeners.keys()))
if response.is_ok():
set_events = list(self._event_listeners.keys())
else:
# One of the following likely happened...
#
# * Our user attached listeners before having an authenticated
# connection, so we couldn't check if we met the version
# requirement.
#
# * User attached listeners to one tor instance, then connected us to
# an older tor instancce.
#
# * Some other controller hiccup (far less likely).
#
# See if we can set some subset of our events.
for event in list(self._event_listeners.keys()):
response = self.msg('SETEVENTS %s' % ' '.join(set_events + [event]))
if response.is_ok():
set_events.append(event)
else:
failed_events.append(event)
return (set_events, failed_events)
def _parse_circ_path(path):
"""
Parses a circuit path as a list of **(fingerprint, nickname)** tuples. Tor
circuit paths are defined as being of the form...
::
Path = LongName *("," LongName)
LongName = Fingerprint [ ( "=" / "~" ) Nickname ]
example:
$999A226EBED397F331B612FE1E4CFAE5C1F201BA=piyaz
... *unless* this is prior to tor version 0.2.2.1 with the VERBOSE_NAMES
feature turned off (or before version 0.1.2.2 where the feature was
introduced). In that case either the fingerprint or nickname in the tuple
will be **None**, depending on which is missing.
::
Path = ServerID *("," ServerID)
ServerID = Nickname / Fingerprint
example:
$E57A476CD4DFBD99B4EE52A100A58610AD6E80B9,hamburgerphone,PrivacyRepublic14
:param str path: circuit path to be parsed
:returns: list of **(fingerprint, nickname)** tuples, fingerprints do not have a proceeding '$'
:raises: :class:`stem.ProtocolError` if the path is malformed
"""
if path:
try:
return [_parse_circ_entry(entry) for entry in path.split(',')]
except stem.ProtocolError as exc:
# include the path with the exception
raise stem.ProtocolError('%s: %s' % (exc, path))
else:
return []
def _parse_circ_entry(entry):
"""
Parses a single relay's 'LongName' or 'ServerID'. See the
:func:`~stem.control._parse_circ_path` function for more information.
:param str entry: relay information to be parsed
:returns: **(fingerprint, nickname)** tuple
:raises: :class:`stem.ProtocolError` if the entry is malformed
"""
if '=' in entry:
# common case
fingerprint, nickname = entry.split('=')
elif '~' in entry:
# this is allowed for by the spec, but I've never seen it used
fingerprint, nickname = entry.split('~')
elif entry[0] == '$':
# old style, fingerprint only
fingerprint, nickname = entry, None
else:
# old style, nickname only
fingerprint, nickname = None, entry
if fingerprint is not None:
if not stem.util.tor_tools.is_valid_fingerprint(fingerprint, True):
raise stem.ProtocolError('Fingerprint in the circuit path is malformed (%s)' % fingerprint)
fingerprint = fingerprint[1:] # strip off the leading '$'
if nickname is not None and not stem.util.tor_tools.is_valid_nickname(nickname):
raise stem.ProtocolError('Nickname in the circuit path is malformed (%s)' % nickname)
return (fingerprint, nickname)
@with_default()
def _case_insensitive_lookup(entries, key, default = UNDEFINED):
"""
Makes a case insensitive lookup within a list or dictionary, providing the
first matching entry that we come across.
:param list,dict entries: list or dictionary to be searched
:param str key: entry or key value to look up
:param object default: value to be returned if the key doesn't exist
:returns: case insensitive match or default if one was provided and key wasn't found
:raises: **ValueError** if no such value exists
"""
if entries is not None:
if isinstance(entries, dict):
for k, v in list(entries.items()):
if k.lower() == key.lower():
return v
else:
for entry in entries:
if entry.lower() == key.lower():
return entry
raise ValueError("key '%s' doesn't exist in dict: %s" % (key, entries))
| gpl-2.0 |
Azure/azure-sdk-for-python | sdk/cosmos/azure-cosmos/test/test_encoding.py | 1 | 2872 | # -*- coding: utf-8 -*-
import unittest
import azure.cosmos.cosmos_client as cosmos_client
import uuid
import pytest
import test_config
pytestmark = pytest.mark.cosmosEmulator
@pytest.mark.usefixtures("teardown")
class EncodingTest(unittest.TestCase):
"""Test to ensure escaping of non-ascii characters from partition key"""
host = test_config._test_config.host
masterKey = test_config._test_config.masterKey
connectionPolicy = test_config._test_config.connectionPolicy
@classmethod
def setUpClass(cls):
if (cls.masterKey == '[YOUR_KEY_HERE]' or
cls.host == '[YOUR_ENDPOINT_HERE]'):
raise Exception(
"You must specify your Azure Cosmos account values for "
"'masterKey' and 'host' at the top of this class to run the "
"tests.")
cls.client = cosmos_client.CosmosClient(cls.host, cls.masterKey, connection_policy=cls.connectionPolicy)
cls.created_collection = test_config._test_config.create_multi_partition_collection_with_custom_pk_if_not_exist(cls.client)
def test_unicode_characters_in_partition_key (self):
test_string = u'€€ کلید پارتیشن विभाजन कुंजी 123'
document_definition = {'pk': test_string, 'id': 'myid' + str(uuid.uuid4())}
created_doc = self.created_collection.create_item(body=document_definition)
read_doc = self.created_collection.read_item(item=created_doc['id'], partition_key=test_string)
self.assertEqual(read_doc['pk'], test_string)
def test_create_document_with_line_separator_para_seperator_next_line_unicodes (self):
test_string = u'Line Separator (
) & Paragraph Separator (
) & Next Line (
) & نیمفاصله'
document_definition = {'pk': 'pk', 'id':'myid' + str(uuid.uuid4()), 'unicode_content':test_string }
created_doc = self.created_collection.create_item(body=document_definition)
read_doc = self.created_collection.read_item(item=created_doc['id'], partition_key='pk')
self.assertEqual(read_doc['unicode_content'], test_string)
def test_create_stored_procedure_with_line_separator_para_seperator_next_line_unicodes (self):
test_string = 'Line Separator (
) & Paragraph Separator (
) & Next Line (
) & نیمفاصله'
test_string_unicode = u'Line Separator (
) & Paragraph Separator (
) & Next Line (
) & نیمفاصله'
stored_proc_definition = {'id':'myid' + str(uuid.uuid4()), 'body': test_string}
created_sp = self.created_collection.scripts.create_stored_procedure(body=stored_proc_definition)
read_sp = self.created_collection.scripts.get_stored_procedure(created_sp['id'])
self.assertEqual(read_sp['body'], test_string_unicode)
if __name__ == '__main__':
unittest.main()
| mit |
oleksa-pavlenko/gae-django-project-template | django/views/csrf.py | 107 | 4958 | from django.conf import settings
from django.http import HttpResponseForbidden
from django.template import Context, Template
from django.utils.translation import ugettext as _
# We include the template inline since we need to be able to reliably display
# this error message, especially for the sake of developers, and there isn't any
# other way of making it available independent of what is in the settings file.
# Only the text appearing with DEBUG=False is translated. Normal translation
# tags cannot be used with this inline templates as makemessages would not be
# able to discover the strings.
CSRF_FAILURE_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>403 Forbidden</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
#info { background:#f6f6f6; }
#info ul { margin: 0.5em 4em; }
#info p, #summary p { padding-top:10px; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ title }} <span>(403)</span></h1>
<p>{{ main }}</p>
{% if no_referer %}
<p>{{ no_referer1 }}</p>
<p>{{ no_referer2 }}</p>
{% endif %}
{% if no_cookie %}
<p>{{ no_cookie1 }}</p>
<p>{{ no_cookie2 }}</p>
{% endif %}
</div>
{% if DEBUG %}
<div id="info">
<h2>Help</h2>
{% if reason %}
<p>Reason given for failure:</p>
<pre>
{{ reason }}
</pre>
{% endif %}
<p>In general, this can occur when there is a genuine Cross Site Request Forgery, or when
<a
href='http://docs.djangoproject.com/en/dev/ref/contrib/csrf/#ref-contrib-csrf'>Django's
CSRF mechanism</a> has not been used correctly. For POST forms, you need to
ensure:</p>
<ul>
<li>Your browser is accepting cookies.</li>
<li>The view function uses <a
href='http://docs.djangoproject.com/en/dev/ref/templates/api/#subclassing-context-requestcontext'><code>RequestContext</code></a>
for the template, instead of <code>Context</code>.</li>
<li>In the template, there is a <code>{% templatetag openblock %} csrf_token
{% templatetag closeblock %}</code> template tag inside each POST form that
targets an internal URL.</li>
<li>If you are not using <code>CsrfViewMiddleware</code>, then you must use
<code>csrf_protect</code> on any views that use the <code>csrf_token</code>
template tag, as well as those that accept the POST data.</li>
</ul>
<p>You're seeing the help section of this page because you have <code>DEBUG =
True</code> in your Django settings file. Change that to <code>False</code>,
and only the initial error message will be displayed. </p>
<p>You can customize this page using the CSRF_FAILURE_VIEW setting.</p>
</div>
{% else %}
<div id="explanation">
<p><small>{{ more }}</small></p>
</div>
{% endif %}
</body>
</html>
"""
def csrf_failure(request, reason=""):
"""
Default view used when request fails CSRF protection
"""
from django.middleware.csrf import REASON_NO_REFERER, REASON_NO_CSRF_COOKIE
t = Template(CSRF_FAILURE_TEMPLATE)
c = Context({
'title': _("Forbidden"),
'main': _("CSRF verification failed. Request aborted."),
'reason': reason,
'no_referer': reason == REASON_NO_REFERER,
'no_referer1': _(
"You are seeing this message because this HTTPS site requires a "
"'Referer header' to be sent by your Web browser, but none was "
"sent. This header is required for security reasons, to ensure "
"that your browser is not being hijacked by third parties."),
'no_referer2': _(
"If you have configured your browser to disable 'Referer' headers, "
"please re-enable them, at least for this site, or for HTTPS "
"connections, or for 'same-origin' requests."),
'no_cookie': reason == REASON_NO_CSRF_COOKIE,
'no_cookie1': _(
"You are seeing this message because this site requires a CSRF "
"cookie when submitting forms. This cookie is required for "
"security reasons, to ensure that your browser is not being "
"hijacked by third parties."),
'no_cookie2': _(
"If you have configured your browser to disable cookies, please "
"re-enable them, at least for this site, or for 'same-origin' "
"requests."),
'DEBUG': settings.DEBUG,
'more': _("More information is available with DEBUG=True."),
})
return HttpResponseForbidden(t.render(c), content_type='text/html')
| mit |
SaschaMester/delicium | third_party/pycoverage/coverage/xmlreport.py | 159 | 5809 | """XML reporting for coverage.py"""
import os, sys, time
import xml.dom.minidom
from coverage import __url__, __version__
from coverage.backward import sorted, rpartition # pylint: disable=W0622
from coverage.report import Reporter
def rate(hit, num):
"""Return the fraction of `hit`/`num`, as a string."""
return "%.4g" % (float(hit) / (num or 1.0))
class XmlReporter(Reporter):
"""A reporter for writing Cobertura-style XML coverage results."""
def __init__(self, coverage, config):
super(XmlReporter, self).__init__(coverage, config)
self.packages = None
self.xml_out = None
self.arcs = coverage.data.has_arcs()
def report(self, morfs, outfile=None):
"""Generate a Cobertura-compatible XML report for `morfs`.
`morfs` is a list of modules or filenames.
`outfile` is a file object to write the XML to.
"""
# Initial setup.
outfile = outfile or sys.stdout
# Create the DOM that will store the data.
impl = xml.dom.minidom.getDOMImplementation()
docType = impl.createDocumentType(
"coverage", None,
"http://cobertura.sourceforge.net/xml/coverage-03.dtd"
)
self.xml_out = impl.createDocument(None, "coverage", docType)
# Write header stuff.
xcoverage = self.xml_out.documentElement
xcoverage.setAttribute("version", __version__)
xcoverage.setAttribute("timestamp", str(int(time.time()*1000)))
xcoverage.appendChild(self.xml_out.createComment(
" Generated by coverage.py: %s " % __url__
))
xpackages = self.xml_out.createElement("packages")
xcoverage.appendChild(xpackages)
# Call xml_file for each file in the data.
self.packages = {}
self.report_files(self.xml_file, morfs)
lnum_tot, lhits_tot = 0, 0
bnum_tot, bhits_tot = 0, 0
# Populate the XML DOM with the package info.
for pkg_name in sorted(self.packages.keys()):
pkg_data = self.packages[pkg_name]
class_elts, lhits, lnum, bhits, bnum = pkg_data
xpackage = self.xml_out.createElement("package")
xpackages.appendChild(xpackage)
xclasses = self.xml_out.createElement("classes")
xpackage.appendChild(xclasses)
for class_name in sorted(class_elts.keys()):
xclasses.appendChild(class_elts[class_name])
xpackage.setAttribute("name", pkg_name.replace(os.sep, '.'))
xpackage.setAttribute("line-rate", rate(lhits, lnum))
xpackage.setAttribute("branch-rate", rate(bhits, bnum))
xpackage.setAttribute("complexity", "0")
lnum_tot += lnum
lhits_tot += lhits
bnum_tot += bnum
bhits_tot += bhits
xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot))
xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot))
# Use the DOM to write the output file.
outfile.write(self.xml_out.toprettyxml())
# Return the total percentage.
denom = lnum_tot + bnum_tot
if denom == 0:
pct = 0.0
else:
pct = 100.0 * (lhits_tot + bhits_tot) / denom
return pct
def xml_file(self, cu, analysis):
"""Add to the XML report for a single file."""
# Create the 'lines' and 'package' XML elements, which
# are populated later. Note that a package == a directory.
package_name = rpartition(cu.name, ".")[0]
className = cu.name
package = self.packages.setdefault(package_name, [{}, 0, 0, 0, 0])
xclass = self.xml_out.createElement("class")
xclass.appendChild(self.xml_out.createElement("methods"))
xlines = self.xml_out.createElement("lines")
xclass.appendChild(xlines)
xclass.setAttribute("name", className)
filename = cu.file_locator.relative_filename(cu.filename)
xclass.setAttribute("filename", filename.replace("\\", "/"))
xclass.setAttribute("complexity", "0")
branch_stats = analysis.branch_stats()
# For each statement, create an XML 'line' element.
for line in sorted(analysis.statements):
xline = self.xml_out.createElement("line")
xline.setAttribute("number", str(line))
# Q: can we get info about the number of times a statement is
# executed? If so, that should be recorded here.
xline.setAttribute("hits", str(int(line not in analysis.missing)))
if self.arcs:
if line in branch_stats:
total, taken = branch_stats[line]
xline.setAttribute("branch", "true")
xline.setAttribute("condition-coverage",
"%d%% (%d/%d)" % (100*taken/total, taken, total)
)
xlines.appendChild(xline)
class_lines = len(analysis.statements)
class_hits = class_lines - len(analysis.missing)
if self.arcs:
class_branches = sum([t for t,k in branch_stats.values()])
missing_branches = sum([t-k for t,k in branch_stats.values()])
class_br_hits = class_branches - missing_branches
else:
class_branches = 0.0
class_br_hits = 0.0
# Finalize the statistics that are collected in the XML DOM.
xclass.setAttribute("line-rate", rate(class_hits, class_lines))
xclass.setAttribute("branch-rate", rate(class_br_hits, class_branches))
package[0][className] = xclass
package[1] += class_hits
package[2] += class_lines
package[3] += class_br_hits
package[4] += class_branches
| bsd-3-clause |
Nikoala/CouchPotatoServer | libs/pyutil/increasing_timer.py | 106 | 6607 | # Copyright (c) 2001 Autonomous Zone Industries
# Copyright (c) 2002-2009 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
This module was invented when it was discovered that time.time() can return
decreasing answers, which was causing scheduled tasks to get executed out of
order. See python bug report `[ #447945 ] time.time() is not
non-decreasing',
http://sourceforge.net/tracker/index.php?func=detail&aid=447945&group_id=5470&atid=105470
http://mail.python.org/pipermail/python-list/2001-August/thread.html#58296
After posting that bug report, I figured out that this isn't really a bug,
but a misunderstanding about the semantics of gettimeofday(). gettimeofday()
relies on the hardware clock, which is supposed to reflect the "real" time
i.e. the position and orientation of our planet with regard to our sun. But
the hardware clock gets adjusted, either for skew (because hardware clocks
always run a little faster or a little slower than they ought), or in order to
sync up with another clock e.g. through NTP. So it isn't really a bug in the
underlying platform (except perhaps a bug in the lack of a prominent warning
in the documentation), but if you depend on a monotonically increasing
timestamps, you need to use IncreasingTimer.time() instead of the Python
standard library's time.time(). --Zooko 2001-08-04
"""
import time as standardtime
# Here is a global reference to an IncreasingTimer.
# This singleton global IncreasingTimer instance gets created at module load time.
timer = None
class IncreasingTimer:
def __init__(self, inittime=None):
"""
@param inittime starting time (in seconds) or None in which case it
will be initialized to standardtime.time()
"""
if inittime is None:
inittime = standardtime.time()
self.lasttime = inittime # This stores the most recent answer that we returned from time().
self.delta = 0 # We add this to the result from the underlying standardtime.time().
# How big of an increment do we need to add in order to make the new float greater than the old float?
trye = 1.0
while (self.lasttime + trye) > self.lasttime:
olde = trye
trye = trye / 2.0
self._EPSILON = olde
def time(self):
"""
This returns the current time as a float, with as much precision as
the underlying Python interpreter can muster. In addition, successive
calls to time() always return bigger numbers. (standardtime.time()
can sometimes return the same or even a *smaller* number!)
On the other hand, calling time() is a bit slower than calling
standardtime.time(), so you might want to avoid it inside tight loops
and deal with decreasing or identical answers yourself.
Now by definition you cannot "reset" this clock to an earlier state.
This means that if you start a Python interpreter and instantiate an
IncreasingTimer, and then you subsequently realize that your
computer's clock was set to next year, and you set it back to the
correct year, that subsequent calls to standardtime.time() will return
a number indicating this year and IncreasingTimer.time() will continue
to return a number indicating next year. Therefore, you should use
the answers from IncreasingTimer.time() in such a way that the only
things you depend on are correctness in the relative *order* of two
times, (and, with the following caveat, the relative *difference*
between two times as well), not the global "correctness" of the times
with respect to the rest of the world.
The caveat is that if the underlying answers from standardtime.time()
jump *forward*, then this *does* distort the relative difference
between two answers from IncreasingTimer.time(). What
IncreasingTimer.time() does is if the underlying clock goes
*backwards*, then IncreasingTimer.time() still returns successively
higher numbers. Then if the underlying clock jumps *forwards*,
IncreasingTimer.time() also jumps forward the same amount. A weird
consequence of this is that if you were to set your system clock to
point to 10 years ago, and call:
t1 = increasingtimer.time()
and then set your system clock back to the present, and call:
t2 = increasingtimer.time()
, then there would be a 10-year difference between t2 and t1.
In practice, adjustments to the underlying system time are rarely that
drastic, and for some systems (e.g. Mnet's DoQ, for which this module
was invented) it doesn't matter anyway if time jumps forward.
Another note: Brian Warner has pointed out that there is another
caveat, which is due to there being a delay between successive calls
to IncreasingTimer.time(). When the underlying clock jumps backward,
then events which were scheduled before the jump and scheduled to go
off after the jump may be delayed by at most d, where d is the delay
between the two successive calls to IncreasingTimer which spanned the
jump.
@singlethreaded You must guarantee that you never have more than one
thread in this function at a time.
"""
t = standardtime.time() + self.delta
lasttime = self.lasttime
if t <= lasttime:
self.delta = self.delta + (lasttime - t) + self._EPSILON
t = lasttime + self._EPSILON
# TODO: If you were sure that you could generate a bigger float in one
# pass, you could change this `while' to an `if' and optimize out a
# test.
while t <= lasttime:
# We can get into here only if self._EPSILON is too small to make
# # the time float "tick over" to a new higher value. So we
# (permanently) # double self._EPSILON.
# TODO: Is doubling epsilon the best way to quickly get a
# minimally bigger float?
self._EPSILON = self._EPSILON * 2.0
# Delta, having smaller magnitude than t, can be incremented by
# more than t was incremented. (Up to the old epsilon more.)
# That's OK.
self.delta = self.delta + self._EPSILON
t = t + self._EPSILON
self.lasttime = t
return t
# create the global IncreasingTimer instance and `time' function
timer = IncreasingTimer()
time = timer.time
| gpl-3.0 |
patjak/linux-stable | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
cafe-grader-team/cafe-grader-web | lib/assets/Lib/encodings/cp1252.py | 37 | 13818 | """ Python Character Mapping Codec cp1252 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1252.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1252',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u2030' # 0x89 -> PER MILLE SIGN
'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
'\ufffe' # 0x8D -> UNDEFINED
'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON
'\ufffe' # 0x8F -> UNDEFINED
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u02dc' # 0x98 -> SMALL TILDE
'\u2122' # 0x99 -> TRADE MARK SIGN
'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
'\ufffe' # 0x9D -> UNDEFINED
'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN
'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
guokeno0/vitess | third_party/py/bson-0.3.2/bson/__init__.py | 36 | 3943 | #!/usr/bin/python -OOOO
# vim: set fileencoding=utf8 shiftwidth=4 tabstop=4 textwidth=80 foldmethod=marker :
# Copyright (c) 2010, Kou Man Tong. All rights reserved.
# For licensing, see LICENSE file included in the package.
"""
BSON serialization and deserialization logic.
Specifications taken from: http://bsonspec.org/#/specification
The following types are unsupported, because for data exchange purposes, they're
over-engineered:
0x06 (Undefined)
0x07 (ObjectId)
0x0b (Regex - Exactly which flavor do you want? Better let higher level
programmers make that decision.)
0x0c (DBPointer)
0x0d (JavaScript code)
0x0e (Symbol)
0x0f (JS w/ scope)
0x11 (MongoDB-specific timestamp)
For binaries, only the default 0x0 type is supported.
>>> a = {
... u"Item A" : u"String item A",
... u"Item D" : {u"ROFLOL" : u"Blah blah blah"},
... u"Item C" : [1, 123456789012345, None, "Party and Bad Romance"],
... u"Item B" : u"\u4e00\u9580\u4e94\u5091"
... }
>>> def sorted(obj, dfs_stack):
... keys = obj.keys()
... keys.sort()
... for i in keys: yield i
...
>>> def reverse(obj, dfs_stack):
... keys = obj.keys()
... keys.sort(reverse = True)
... for i in keys: yield i
...
>>> serialized = dumps(a, sorted)
>>> serialized
'\\x9f\\x00\\x00\\x00\\x02Item A\\x00\\x0e\\x00\\x00\\x00String item A\\x00\\x02Item B\\x00\\r\\x00\\x00\\x00\\xe4\\xb8\\x80\\xe9\\x96\\x80\\xe4\\xba\\x94\\xe5\\x82\\x91\\x00\\x04Item C\\x007\\x00\\x00\\x00\\x100\\x00\\x01\\x00\\x00\\x00\\x121\\x00y\\xdf\\r\\x86Hp\\x00\\x00\\n2\\x00\\x053\\x00\\x15\\x00\\x00\\x00\\x00Party and Bad Romance\\x00\\x03Item D\\x00 \\x00\\x00\\x00\\x02ROFLOL\\x00\\x0f\\x00\\x00\\x00Blah blah blah\\x00\\x00\\x00'
>>>
>>> b = loads(serialized)
>>> b
{u'Item C': [1, 123456789012345, None, 'Party and Bad Romance'], u'Item B': u'\\u4e00\\u9580\\u4e94\\u5091', u'Item A': u'String item A', u'Item D': {u'ROFLOL': u'Blah blah blah'}}
>>> reverse_serialized = dumps(a, reverse)
>>> reverse_serialized
'\\x9f\\x00\\x00\\x00\\x03Item D\\x00 \\x00\\x00\\x00\\x02ROFLOL\\x00\\x0f\\x00\\x00\\x00Blah blah blah\\x00\\x00\\x04Item C\\x007\\x00\\x00\\x00\\x100\\x00\\x01\\x00\\x00\\x00\\x121\\x00y\\xdf\\r\\x86Hp\\x00\\x00\\n2\\x00\\x053\\x00\\x15\\x00\\x00\\x00\\x00Party and Bad Romance\\x00\\x02Item B\\x00\\r\\x00\\x00\\x00\\xe4\\xb8\\x80\\xe9\\x96\\x80\\xe4\\xba\\x94\\xe5\\x82\\x91\\x00\\x02Item A\\x00\\x0e\\x00\\x00\\x00String item A\\x00\\x00'
>>> c = loads(reverse_serialized)
>>> c
{u'Item C': [1, 123456789012345, None, 'Party and Bad Romance'], u'Item B': u'\\u4e00\\u9580\\u4e94\\u5091', u'Item A': u'String item A', u'Item D': {u'ROFLOL': u'Blah blah blah'}}
"""
from codec import *
import network
__all__ = ["loads", "dumps"]
# {{{ Serialization and Deserialization
def dumps(obj, generator = None):
"""
Given a dict, outputs a BSON string.
generator is an optional function which accepts the dictionary/array being
encoded, the current DFS traversal stack, and outputs an iterator indicating
the correct encoding order for keys.
"""
if isinstance(obj, BSONCoding):
return encode_object(obj, [], generator_func = generator)
return encode_document(obj, [], generator_func = generator)
def loads(data):
"""
Given a BSON string, outputs a dict.
"""
return decode_document(data, 0)[1]
# }}}
# {{{ Socket Patchers
def patch_socket():
"""
Patches the Python socket class such that sockets can send and receive BSON
objects atomically.
This adds the following functions to socket:
recvbytes(bytes_needed, sock_buf = None) - reads bytes_needed bytes
atomically. Returns None if socket closed.
recvobj() - reads a BSON document from the socket atomically and returns
the deserialized dictionary. Returns None if socket closed.
sendobj(obj) - sends a BSON document to the socket atomically.
"""
from socket import socket
socket.recvbytes = network._recvbytes
socket.recvobj = network._recvobj
socket.sendobj = network._sendobj
# }}}
| bsd-3-clause |
digdoritos/gimp | plug-ins/pygimp/plug-ins/palette-sort.py | 5 | 12031 | #!/usr/bin/env python
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gimpfu import *
from colorsys import rgb_to_yiq
from random import randint
gettext.install("gimp20-python", gimp.locale_directory, unicode=True)
AVAILABLE_CHANNELS = (_("Red"), _("Green"), _("Blue"),
_("Luma (Y)"),
_("Hue"), _("Saturation"), _("Value"),
_("Saturation (HSL)"), _("Lightness (HSL)"),
_("Index"),
_("Random"))
GRAIN_SCALE = (1.0, 1.0 , 1.0,
1.0,
360., 100., 100.,
100., 100.,
16384.,
float(0x7ffffff),
100., 256., 256.,
256., 360.,)
SELECT_ALL = 0
SELECT_SLICE = 1
SELECT_AUTOSLICE = 2
SELECT_PARTITIONED = 3
SELECTIONS = (SELECT_ALL, SELECT_SLICE, SELECT_AUTOSLICE, SELECT_PARTITIONED)
def noop(v, i):
return v
def to_hsv(v, i):
return v.to_hsv()
def to_hsl(v, i):
return v.to_hsl()
def to_yiq(v, i):
return rgb_to_yiq(*v[:-1])
def to_index(v, i):
return (i,)
def to_random(v, i):
return (randint(0, 0x7fffffff),)
channel_getters = [ (noop, 0), (noop, 1), (noop, 2),
(to_yiq, 0),
(to_hsv, 0), (to_hsv, 1), (to_hsv, 2),
(to_hsl, 1), (to_hsl, 2),
(to_index, 0),
(to_random, 0)]
try:
from colormath.color_objects import RGBColor, LabColor, LCHabColor
AVAILABLE_CHANNELS = AVAILABLE_CHANNELS + (_("Lightness (LAB)"), _("A-color"), _("B-color"),
_("Chroma (LCHab)"), _("Hue (LCHab)"))
to_lab = lambda v,i: RGBColor(*v[:-1]).convert_to('LAB').get_value_tuple()
to_lchab = lambda v,i: RGBColor(*v[:-1]).convert_to('LCHab').get_value_tuple()
channel_getters.extend([(to_lab, 0), (to_lab, 1), (to_lab, 2),
(to_lchab, 1), (to_lchab, 2)])
except ImportError:
pass
def parse_slice(s, numcolors):
"""Parse a slice spec and return (start, nrows, length)
All items are optional. Omitting them makes the largest possible selection that
exactly fits the other items.
start:nrows,length
'' selects all items, as does ':'
':4,' makes a 4-row selection out of all colors (length auto-determined)
':4' also.
':1,4' selects the first 4 colors
':,4' selects rows of 4 colors (nrows auto-determined)
':4,4' selects 4 rows of 4 colors
'4:' selects a single row of all colors after 4, inclusive.
'4:,4' selects rows of 4 colors, starting at 4 (nrows auto-determined)
'4:4,4' selects 4 rows of 4 colors (16 colors total), beginning at index 4.
'4' is illegal (ambiguous)
In general, slices are comparable to a numpy sub-array.
'start at element START, with shape (NROWS, LENGTH)'
"""
s = s.strip()
def notunderstood():
raise ValueError('Slice %r not understood. Should be in format'
' START?:NROWS?,ROWLENGTH? eg. "0:4,16".' % s)
def _int(v):
try:
return int(v)
except ValueError:
notunderstood()
if s in ('', ':', ':,'):
return 0, 1, numcolors # entire palette, one row
if s.count(':') != 1:
notunderstood()
rowpos = s.find(':')
start = 0
if rowpos > 0:
start = _int(s[:rowpos])
numcolors -= start
nrows = 1
if ',' in s:
commapos = s.find(',')
nrows = s[rowpos+1:commapos]
length = s[commapos+1:]
if not nrows:
if not length:
notunderstood()
else:
length = _int(length)
if length == 0:
notunderstood()
nrows = numcolors // length
if numcolors % length:
nrows = -nrows
elif not length:
nrows = _int(nrows)
if nrows == 0:
notunderstood()
length = numcolors // nrows
if numcolors % nrows:
length = -length
else:
nrows = _int(nrows)
if nrows == 0:
notunderstood()
length = _int(length)
if length == 0:
notunderstood()
else:
nrows = _int(s[rowpos+1:])
if nrows == 0:
notunderstood()
length = numcolors // nrows
if numcolors % nrows:
length = -length
return start, nrows, length
def quantization_grain(channel, g):
"Given a channel and a quantization, return the size of a quantization grain"
g = max(1.0, g)
if g <= 1.0:
g = 0.00001
else:
g = max(0.00001, GRAIN_SCALE[channel] / g)
return g
def palette_sort (palette, selection, slice_expr, channel, quantize,
ascending, pchannel, pquantize):
grain = quantization_grain(channel, quantize)
pgrain = quantization_grain(pchannel, pquantize)
#If palette is read only, work on a copy:
editable = pdb.gimp_palette_is_editable(palette)
if not editable:
palette = pdb.gimp_palette_duplicate (palette)
num_colors = pdb.gimp_palette_get_info (palette)
start, nrows, length = None, None, None
if selection == SELECT_AUTOSLICE:
def find_index(color, startindex=0):
for i in range(startindex, num_colors):
c = pdb.gimp_palette_entry_get_color (palette, i)
if c == color:
return i
return None
def hexcolor(c):
return "#%02x%02x%02x" % tuple(c[:-1])
fg = pdb.gimp_context_get_foreground()
bg = pdb.gimp_context_get_background()
start = find_index(fg)
end = find_index(bg)
if start is None:
raise ValueError("Couldn't find foreground color %r in palette" % list(fg))
if end is None:
raise ValueError("Couldn't find background color %r in palette" % list(bg))
if find_index(fg, start + 1):
raise ValueError('Autoslice cannot be used when more than one'
' instance of an endpoint'
' (%s) is present' % hexcolor(fg))
if find_index(bg, end + 1):
raise ValueError('Autoslice cannot be used when more than one'
' instance of an endpoint'
' (%s) is present' % hexcolor(bg))
if start > end:
end, start = start, end
length = (end - start) + 1
try:
_, nrows, _ = parse_slice(slice_expr, length)
nrows = abs(nrows)
if length % nrows:
raise ValueError('Total length %d not evenly divisible'
' by number of rows %d' % (length, nrows))
length /= nrows
except ValueError:
# bad expression is okay here, just assume one row
nrows = 1
# remaining behaviour is implemented by SELECT_SLICE 'inheritance'.
selection= SELECT_SLICE
elif selection in (SELECT_SLICE, SELECT_PARTITIONED):
start, nrows, length = parse_slice(slice_expr, num_colors)
channels_getter, channel_index = channel_getters[channel]
def get_colors (start, end):
result = []
for i in range (start, end):
entry = (pdb.gimp_palette_entry_get_name (palette, i),
pdb.gimp_palette_entry_get_color (palette, i))
index = channels_getter(entry[1], i)[channel_index]
index = (index - (index % grain))
result.append((index, entry))
return result
if selection == SELECT_ALL:
entry_list = get_colors(0, num_colors)
entry_list.sort(key=lambda v:v[0], reverse=not ascending)
for i in range(num_colors):
pdb.gimp_palette_entry_set_name (palette, i, entry_list[i][1][0])
pdb.gimp_palette_entry_set_color (palette, i, entry_list[i][1][1])
elif selection == SELECT_PARTITIONED:
if num_colors < (start + length * nrows) - 1:
raise ValueError('Not enough entries in palette to sort complete rows!'
' Got %d, expected >=%d' % (num_colors, start + length * nrows))
pchannels_getter, pchannel_index = channel_getters[pchannel]
for row in range(nrows):
partition_spans = [1]
rowstart = start + (row * length)
old_color = pdb.gimp_palette_entry_get_color (palette,
rowstart)
old_partition = pchannels_getter(old_color, rowstart)[pchannel_index]
old_partition = old_partition - (old_partition % pgrain)
for i in range(rowstart + 1, rowstart + length):
this_color = pdb.gimp_palette_entry_get_color (palette, i)
this_partition = pchannels_getter(this_color, i)[pchannel_index]
this_partition = this_partition - (this_partition % pgrain)
if this_partition == old_partition:
partition_spans[-1] += 1
else:
partition_spans.append(1)
old_partition = this_partition
base = rowstart
for size in partition_spans:
palette_sort(palette, SELECT_SLICE, '%d:1,%d' % (base, size),
channel, quantize, ascending, 0, 1.0)
base += size
else:
stride = length
if num_colors < (start + stride * nrows) - 1:
raise ValueError('Not enough entries in palette to sort complete rows!'
' Got %d, expected >=%d' % (num_colors, start + stride * nrows))
for row_start in range(start, start + stride * nrows, stride):
sublist = get_colors(row_start, row_start + stride)
sublist.sort(key=lambda v:v[0], reverse=not ascending)
for i, entry in zip(range(row_start, row_start + stride), sublist):
pdb.gimp_palette_entry_set_name (palette, i, entry[1][0])
pdb.gimp_palette_entry_set_color (palette, i, entry[1][1])
return palette
register(
"python-fu-palette-sort",
N_("Sort the colors in a palette"),
"palette_sort (palette, selection, slice_expr, channel, quantize,"
" ascending, pchannel, pquantize) -> new_palette",
"Joao S. O. Bueno Calligaris, Carol Spears, David Gowers",
"Joao S. O. Bueno Calligaris",
"2006",
N_("_Sort Palette..."),
"",
[
(PF_PALETTE, "palette", _("Palette"), ""),
(PF_OPTION, "selections", _("Se_lections"), SELECT_ALL,
(_("All"), _("Slice / Array"), _("Autoslice (fg->bg)"), _("Partitioned"))),
(PF_STRING, "slice-expr", _("Slice _expression"), ''),
(PF_OPTION, "channel", _("Channel to _sort"), 3,
AVAILABLE_CHANNELS),
(PF_FLOAT, "quantize", _("_Quantization"), 0.0),
(PF_BOOL, "ascending", _("_Ascending"), True),
(PF_OPTION, "pchannel", _("_Partitioning channel"), 3,
AVAILABLE_CHANNELS),
(PF_FLOAT, "pquantize", _("Partition q_uantization"), 0.0),
],
[],
palette_sort,
menu="<Palettes>",
domain=("gimp20-python", gimp.locale_directory)
)
main ()
| gpl-3.0 |
alizamus/pox_controller | pox/proto/dns_spy.py | 45 | 4072 | # Copyright 2011-2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This component spies on DNS replies, stores the results, and raises events
when things are looked up or when its stored mappings are updated.
Similar to NOX's DNSSpy component, but with more features.
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
import pox.lib.packet as pkt
import pox.lib.packet.dns as pkt_dns
from pox.lib.addresses import IPAddr
from pox.lib.revent import *
log = core.getLogger()
class DNSUpdate (Event):
def __init__ (self, item):
Event.__init__()
self.item = item
class DNSLookup (Event):
def __init__ (self, rr):
Event.__init__()
self.name = rr.name
self.qtype = rr.qtype
self.rr = rr
for t in pkt_dns.rrtype_to_str.values():
setattr(self, t, False)
t = pkt_dns.rrtype_to_str.get(rr.qtype)
if t is not None:
setattr(self, t, True)
setattr(self, "OTHER", False)
else:
setattr(self, "OTHER", True)
class DNSSpy (EventMixin):
_eventMixin_events = set([ DNSUpdate, DNSLookup ])
def __init__ (self, install_flow = True):
self._install_flow = install_flow
self.ip_to_name = {}
self.name_to_ip = {}
self.cname = {}
core.openflow.addListeners(self)
# Add handy function to console
core.Interactive.variables['lookup'] = self.lookup
def _handle_ConnectionUp (self, event):
if self._install_flow:
msg = of.ofp_flow_mod()
msg.match = of.ofp_match()
msg.match.dl_type = pkt.ethernet.IP_TYPE
msg.match.nw_proto = pkt.ipv4.UDP_PROTOCOL
msg.match.tp_src = 53
msg.actions.append(of.ofp_action_output(port = of.OFPP_CONTROLLER))
event.connection.send(msg)
def lookup (self, something):
if something in self.name_to_ip:
return self.name_to_ip[something]
if something in self.cname:
return self.lookup(self.cname[something])
try:
return self.ip_to_name.get(IPAddr(something))
except:
return None
def _record (self, ip, name):
# Handle reverse lookups correctly?
modified = False
val = self.ip_to_name.setdefault(ip, [])
if name not in val:
val.insert(0, name)
modified = True
val = self.name_to_ip.setdefault(name, [])
if ip not in val:
val.insert(0, ip)
modified = True
return modified
def _record_cname (self, name, cname):
modified = False
val = self.cname.setdefault(name, [])
if name not in val:
val.insert(0, cname)
modified = True
return modified
def _handle_PacketIn (self, event):
p = event.parsed.find('dns')
if p is not None and p.parsed:
log.debug(p)
for q in p.questions:
if q.qclass != 1: continue # Internet only
self.raiseEvent(DNSLookup, q)
def process_q (entry):
if entry.qclass != 1:
# Not internet
return
if entry.qtype == pkt.dns.rr.CNAME_TYPE:
if self._record_cname(entry.name, entry.rddata):
self.raiseEvent(DNSUpdate, entry.name)
log.info("add cname entry: %s %s" % (entry.rddata, entry.name))
elif entry.qtype == pkt.dns.rr.A_TYPE:
if self._record(entry.rddata, entry.name):
self.raiseEvent(DNSUpdate, entry.name)
log.info("add dns entry: %s %s" % (entry.rddata, entry.name))
for answer in p.answers:
process_q(answer)
for addition in p.additional:
process_q(addition)
def launch (no_flow = False):
core.registerNew(DNSSpy, not no_flow)
| apache-2.0 |
vadimtk/chrome4sdp | third_party/mojo/src/mojo/public/third_party/jinja2/sandbox.py | 637 | 13445 | # -*- coding: utf-8 -*-
"""
jinja2.sandbox
~~~~~~~~~~~~~~
Adds a sandbox layer to Jinja as it was the default behavior in the old
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
default behavior is easier to use.
The behavior can be changed by subclassing the environment.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
import operator
from jinja2.environment import Environment
from jinja2.exceptions import SecurityError
from jinja2._compat import string_types, function_type, method_type, \
traceback_type, code_type, frame_type, generator_type, PY2
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
'func_defaults', 'func_globals'])
#: unsafe method attributes. function attributes are unsafe for methods too
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
#: unsafe generator attirbutes.
UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
# On versions > python 2 the special attributes on functions are gone,
# but they remain on methods and generators for whatever reason.
if not PY2:
UNSAFE_FUNCTION_ATTRIBUTES = set()
import warnings
# make sure we don't warn in python 2.6 about stuff we don't care about
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
module='jinja2.sandbox')
from collections import deque
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
pass
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
try:
from collections import MutableSet, MutableMapping, MutableSequence
_mutable_set_types += (MutableSet,)
_mutable_mapping_types += (MutableMapping,)
_mutable_sequence_types += (MutableSequence,)
except ImportError:
pass
_mutable_spec = (
(_mutable_set_types, frozenset([
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
'symmetric_difference_update', 'update'
])),
(_mutable_mapping_types, frozenset([
'clear', 'pop', 'popitem', 'setdefault', 'update'
])),
(_mutable_sequence_types, frozenset([
'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
])),
(deque, frozenset([
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
'popleft', 'remove', 'rotate'
]))
)
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError('range too big, maximum size for range is %d' %
MAX_RANGE)
return rng
def unsafe(f):
"""Marks a function or method as unsafe.
::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(lambda: None, "func_code")
True
>>> is_internal_attribute((lambda x:x).func_code, 'co_code')
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, function_type):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, method_type):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == 'mro':
return True
elif isinstance(obj, (code_type, traceback_type, frame_type)):
return True
elif isinstance(obj, generator_type):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
return attr.startswith('__')
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occour during the rendering so
the caller has to ensure that all exceptions are catched.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table = {
'+': operator.pos,
'-': operator.neg
}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops = frozenset()
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
method returns `True`, :meth:`call_unop` is excuted for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
The following unary operators are interceptable: ``+`` and ``-``
Intercepted calls are always slower than the native operator call,
so make sure only to intercept the ones you are interested in.
.. versionadded:: 2.6
"""
return False
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals['range'] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith('_') or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (getattr(obj, 'unsafe_callable', False) or
getattr(obj, 'alters_data', False))
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context, operator, arg):
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined('access to attribute %r of %r '
'object is unsafe.' % (
attribute,
obj.__class__.__name__
), name=attribute, obj=obj, exc=SecurityError)
def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
| bsd-3-clause |
antepsis/anteplahmacun | sympy/diffgeom/tests/test_function_diffgeom_book.py | 102 | 5258 | from sympy.diffgeom.rn import R2, R2_p, R2_r, R3_r
from sympy.diffgeom import intcurve_series, Differential, WedgeProduct
from sympy.core import symbols, Function, Derivative
from sympy.simplify import trigsimp, simplify
from sympy.functions import sqrt, atan2, sin, cos
from sympy.matrices import Matrix
# Most of the functionality is covered in the
# test_functional_diffgeom_ch* tests which are based on the
# example from the paper of Sussman and Wisdom.
# If they do not cover something, additional tests are added in other test
# functions.
# From "Functional Differential Geometry" as of 2011
# by Sussman and Wisdom.
def test_functional_diffgeom_ch2():
x0, y0, r0, theta0 = symbols('x0, y0, r0, theta0', real=True)
x, y = symbols('x, y', real=True)
f = Function('f')
assert (R2_p.point_to_coords(R2_r.point([x0, y0])) ==
Matrix([sqrt(x0**2 + y0**2), atan2(y0, x0)]))
assert (R2_r.point_to_coords(R2_p.point([r0, theta0])) ==
Matrix([r0*cos(theta0), r0*sin(theta0)]))
assert R2_p.jacobian(R2_r, [r0, theta0]) == Matrix(
[[cos(theta0), -r0*sin(theta0)], [sin(theta0), r0*cos(theta0)]])
field = f(R2.x, R2.y)
p1_in_rect = R2_r.point([x0, y0])
p1_in_polar = R2_p.point([sqrt(x0**2 + y0**2), atan2(y0, x0)])
assert field.rcall(p1_in_rect) == f(x0, y0)
assert field.rcall(p1_in_polar) == f(x0, y0)
p_r = R2_r.point([x0, y0])
p_p = R2_p.point([r0, theta0])
assert R2.x(p_r) == x0
assert R2.x(p_p) == r0*cos(theta0)
assert R2.r(p_p) == r0
assert R2.r(p_r) == sqrt(x0**2 + y0**2)
assert R2.theta(p_r) == atan2(y0, x0)
h = R2.x*R2.r**2 + R2.y**3
assert h.rcall(p_r) == x0*(x0**2 + y0**2) + y0**3
assert h.rcall(p_p) == r0**3*sin(theta0)**3 + r0**3*cos(theta0)
def test_functional_diffgeom_ch3():
x0, y0 = symbols('x0, y0', real=True)
x, y, t = symbols('x, y, t', real=True)
f = Function('f')
b1 = Function('b1')
b2 = Function('b2')
p_r = R2_r.point([x0, y0])
s_field = f(R2.x, R2.y)
v_field = b1(R2.x)*R2.e_x + b2(R2.y)*R2.e_y
assert v_field.rcall(s_field).rcall(p_r).doit() == b1(
x0)*Derivative(f(x0, y0), x0) + b2(y0)*Derivative(f(x0, y0), y0)
assert R2.e_x(R2.r**2).rcall(p_r) == 2*x0
v = R2.e_x + 2*R2.e_y
s = R2.r**2 + 3*R2.x
assert v.rcall(s).rcall(p_r).doit() == 2*x0 + 4*y0 + 3
circ = -R2.y*R2.e_x + R2.x*R2.e_y
series = intcurve_series(circ, t, R2_r.point([1, 0]), coeffs=True)
series_x, series_y = zip(*series)
assert all(
[term == cos(t).taylor_term(i, t) for i, term in enumerate(series_x)])
assert all(
[term == sin(t).taylor_term(i, t) for i, term in enumerate(series_y)])
def test_functional_diffgeom_ch4():
x0, y0, theta0 = symbols('x0, y0, theta0', real=True)
x, y, r, theta = symbols('x, y, r, theta', real=True)
r0 = symbols('r0', positive=True)
f = Function('f')
b1 = Function('b1')
b2 = Function('b2')
p_r = R2_r.point([x0, y0])
p_p = R2_p.point([r0, theta0])
f_field = b1(R2.x, R2.y)*R2.dx + b2(R2.x, R2.y)*R2.dy
assert f_field.rcall(R2.e_x).rcall(p_r) == b1(x0, y0)
assert f_field.rcall(R2.e_y).rcall(p_r) == b2(x0, y0)
s_field_r = f(R2.x, R2.y)
df = Differential(s_field_r)
assert df(R2.e_x).rcall(p_r).doit() == Derivative(f(x0, y0), x0)
assert df(R2.e_y).rcall(p_r).doit() == Derivative(f(x0, y0), y0)
s_field_p = f(R2.r, R2.theta)
df = Differential(s_field_p)
assert trigsimp(df(R2.e_x).rcall(p_p).doit()) == (
cos(theta0)*Derivative(f(r0, theta0), r0) -
sin(theta0)*Derivative(f(r0, theta0), theta0)/r0)
assert trigsimp(df(R2.e_y).rcall(p_p).doit()) == (
sin(theta0)*Derivative(f(r0, theta0), r0) +
cos(theta0)*Derivative(f(r0, theta0), theta0)/r0)
assert R2.dx(R2.e_x).rcall(p_r) == 1
assert R2.dx(R2.e_x) == 1
assert R2.dx(R2.e_y).rcall(p_r) == 0
assert R2.dx(R2.e_y) == 0
circ = -R2.y*R2.e_x + R2.x*R2.e_y
assert R2.dx(circ).rcall(p_r).doit() == -y0
assert R2.dy(circ).rcall(p_r) == x0
assert R2.dr(circ).rcall(p_r) == 0
assert simplify(R2.dtheta(circ).rcall(p_r)) == 1
assert (circ - R2.e_theta).rcall(s_field_r).rcall(p_r) == 0
def test_functional_diffgeom_ch6():
u0, u1, u2, v0, v1, v2, w0, w1, w2 = symbols('u0:3, v0:3, w0:3', real=True)
u = u0*R2.e_x + u1*R2.e_y
v = v0*R2.e_x + v1*R2.e_y
wp = WedgeProduct(R2.dx, R2.dy)
assert wp(u, v) == u0*v1 - u1*v0
u = u0*R3_r.e_x + u1*R3_r.e_y + u2*R3_r.e_z
v = v0*R3_r.e_x + v1*R3_r.e_y + v2*R3_r.e_z
w = w0*R3_r.e_x + w1*R3_r.e_y + w2*R3_r.e_z
wp = WedgeProduct(R3_r.dx, R3_r.dy, R3_r.dz)
assert wp(
u, v, w) == Matrix(3, 3, [u0, u1, u2, v0, v1, v2, w0, w1, w2]).det()
a, b, c = symbols('a, b, c', cls=Function)
a_f = a(R3_r.x, R3_r.y, R3_r.z)
b_f = b(R3_r.x, R3_r.y, R3_r.z)
c_f = c(R3_r.x, R3_r.y, R3_r.z)
theta = a_f*R3_r.dx + b_f*R3_r.dy + c_f*R3_r.dz
dtheta = Differential(theta)
da = Differential(a_f)
db = Differential(b_f)
dc = Differential(c_f)
expr = dtheta - WedgeProduct(
da, R3_r.dx) - WedgeProduct(db, R3_r.dy) - WedgeProduct(dc, R3_r.dz)
assert expr.rcall(R3_r.e_x, R3_r.e_y) == 0
| bsd-3-clause |
danalec/dotfiles | sublime/.config/sublime-text-3/Packages/anaconda_go/plugin/handlers_go/commands/goto.py | 2 | 2949 |
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
import traceback
from ..anagonda.context import guru, godef
from ..anagonda.context.error import AnaGondaError
from commands.base import Command
class Goto(Command):
"""Run guru or godef
"""
def __init__(self, callback, uid, vid, code, filename, settings, go_env):
self.vid = vid
self.code = code
self.path = filename
self.settings = settings
self.go_env = go_env
super(Goto, self).__init__(callback, uid)
def run(self):
"""Run the command
"""
try:
self.callback({
'success': True,
'result': self.goto(),
'uid': self.uid,
'vid': self.vid
})
except Exception as error:
logging.error(error)
logging.debug(traceback.format_exc())
self.callback({
'success': False,
'error': str(error),
'uid': self.uid,
'vid': self.vid
})
def goto(self):
"""Run GoDef, GuRu or both and return back a ready to use result
"""
guru_usage = self.settings.get('guru_usage', 'always')
if guru_usage == 'always':
return self._normalize(self.guru())
defs = []
try:
defs = self._normalize(self.godef())
except AnaGondaError as err:
logging.error('GoDef failed with error: {0}'.format(err))
if guru_usage == 'fallback':
defs = self._normalize(self.guru())
else:
if len(defs) == 0 and guru_usage == 'fallback':
defs = self._normalize(self.guru())
return defs
def guru(self):
"""Use Guru context and return back the result
"""
with guru.Guru(
None, 'definition', self.path,
self.settings.get('offset', 0),
self.settings.get('modified_buffer'), self.go_env) as defs:
return defs
def godef(self):
"""Use GoDef context and return back the result
"""
with godef.GoDef(
self.code, self.path,
self.settings.get('expr', ''), False, self.go_env) as defs:
return defs
def _normalize(self, defs):
"""Normalize tools output into anaconda's goto format
"""
if defs['tool'] == 'guru':
try:
return [{
'title': defs['objpos'].split(':')[0],
'position': defs['objpos']
}]
except:
return []
return [{
'title': defs['filename'],
'position': '{}:{}:{}'.format(
defs['filename'], defs['line'], defs['column']
)
}]
| mit |
chauhanhardik/populo | lms/djangoapps/staticbook/views.py | 91 | 6351 | """
Views for serving static textbooks.
"""
from django.contrib.auth.decorators import login_required
from django.http import Http404
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.annotator_token import retrieve_token
from courseware.access import has_access
from courseware.courses import get_course_with_access
from notes.utils import notes_enabled_for_course
from static_replace import replace_static_urls
@login_required
def index(request, course_id, book_index, page=None):
"""
Serve static image-based textbooks.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.textbooks[book_index]
table_of_contents = textbook.table_of_contents
if page is None:
page = textbook.start_page
return render_to_response(
'staticbook.html',
{
'book_index': book_index, 'page': int(page),
'course': course,
'book_url': textbook.book_url,
'table_of_contents': table_of_contents,
'start_page': textbook.start_page,
'end_page': textbook.end_page,
'staff_access': staff_access,
},
)
def remap_static_url(original_url, course):
"""Remap a URL in the ways the course requires."""
# Ick: this should be possible without having to quote and unquote the URL...
input_url = "'" + original_url + "'"
output_url = replace_static_urls(
input_url,
getattr(course, 'data_dir', None),
course_id=course.id,
static_asset_path=course.static_asset_path
)
# strip off the quotes again...
return output_url[1:-1]
@login_required
def pdf_index(request, course_id, book_index, chapter=None, page=None):
"""
Display a PDF textbook.
course_id: course for which to display text. The course should have
"pdf_textbooks" property defined.
book index: zero-based index of which PDF textbook to display.
chapter: (optional) one-based index into the chapter array of textbook PDFs to display.
Defaults to first chapter. Specifying this assumes that there are separate PDFs for
each chapter in a textbook.
page: (optional) one-based page number to display within the PDF. Defaults to first page.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.pdf_textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.pdf_textbooks[book_index]
viewer_params = '&file='
current_url = ''
if 'url' in textbook:
textbook['url'] = remap_static_url(textbook['url'], course)
viewer_params += textbook['url']
current_url = textbook['url']
# then remap all the chapter URLs as well, if they are provided.
current_chapter = None
if 'chapters' in textbook:
for entry in textbook['chapters']:
entry['url'] = remap_static_url(entry['url'], course)
if chapter is not None:
current_chapter = textbook['chapters'][int(chapter) - 1]
else:
current_chapter = textbook['chapters'][0]
viewer_params += current_chapter['url']
current_url = current_chapter['url']
viewer_params += '#zoom=page-fit&disableRange=true'
if page is not None:
viewer_params += '&page={}'.format(page)
if request.GET.get('viewer', '') == 'true':
template = 'pdf_viewer.html'
else:
template = 'static_pdfbook.html'
return render_to_response(
template,
{
'book_index': book_index,
'course': course,
'textbook': textbook,
'chapter': chapter,
'page': page,
'viewer_params': viewer_params,
'current_chapter': current_chapter,
'staff_access': staff_access,
'current_url': current_url,
},
)
@login_required
def html_index(request, course_id, book_index, chapter=None):
"""
Display an HTML textbook.
course_id: course for which to display text. The course should have
"html_textbooks" property defined.
book index: zero-based index of which HTML textbook to display.
chapter: (optional) one-based index into the chapter array of textbook HTML files to display.
Defaults to first chapter. Specifying this assumes that there are separate HTML files for
each chapter in a textbook.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
notes_enabled = notes_enabled_for_course(course)
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.html_textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.html_textbooks[book_index]
if 'url' in textbook:
textbook['url'] = remap_static_url(textbook['url'], course)
# then remap all the chapter URLs as well, if they are provided.
if 'chapters' in textbook:
for entry in textbook['chapters']:
entry['url'] = remap_static_url(entry['url'], course)
student = request.user
return render_to_response(
'static_htmlbook.html',
{
'book_index': book_index,
'course': course,
'textbook': textbook,
'chapter': chapter,
'student': student,
'staff_access': staff_access,
'notes_enabled': notes_enabled,
'storage': course.annotation_storage_url,
'token': retrieve_token(student.email, course.annotation_token_secret),
},
)
| agpl-3.0 |
clee704/NaverWebtoonFeeds | docs/conf.py | 1 | 8308 | # -*- coding: utf-8 -*-
#
# NaverWebtoonFeeds documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 31 11:29:44 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from naverwebtoonfeeds import __version__
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
]
intersphinx_mapping = {
'python': ('http://docs.python.org/2.7', None),
'requests': ('http://docs.python-requests.org/en/v1.1.0', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NaverWebtoonFeeds'
copyright = u'2014, Choongmin Lee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'NaverWebtoonFeedsDoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'NaverWebtoonFeeds.tex', u'Naver Webtoon Feeds Documentation',
u'Choongmin Lee', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'naverwebtoonfeeds', u'Naver Webtoon Feeds Documentation',
[u'Choongmin Lee'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'NaverWebtoonFeeds', u'Naver Webtoon Feeds Documentation',
u'Choongmin Lee', 'NaverWebtoonFeeds', 'Feeds for Naver webtoons',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| agpl-3.0 |
arruda/ANTLR3-python | unittests/testtree.py | 1 | 41340 | # -*- coding: utf-8 -*-
import os
import unittest
from StringIO import StringIO
from antlr3.tree import (CommonTreeNodeStream, CommonTree, CommonTreeAdaptor,
TreeParser, TreeVisitor)
from antlr3 import CommonToken, UP, DOWN, EOF
from antlr3.treewizard import TreeWizard
class TestTreeNodeStream(unittest.TestCase):
"""Test case for the TreeNodeStream class."""
def setUp(self):
self.adaptor = CommonTreeAdaptor()
def newStream(self, t):
"""Build new stream; let's us override to test other streams."""
return CommonTreeNodeStream(t)
def testSingleNode(self):
t = CommonTree(CommonToken(101))
stream = self.newStream(t)
expecting = "101"
found = self.toNodesOnlyString(stream)
self.failUnlessEqual(expecting, found)
expecting = "101"
found = str(stream)
self.failUnlessEqual(expecting, found)
def testTwoChildrenOfNilRoot(self):
class V(CommonTree):
def __init__(self, token=None, ttype=None, x=None):
if x is not None:
self.x = x
if ttype is not None and token is None:
self.token = CommonToken(type=ttype)
if token is not None:
self.token = token
def __str__(self):
if self.token is not None:
txt = self.token.text
else:
txt = ""
txt += "<V>"
return txt
root_0 = self.adaptor.nil();
t = V(ttype=101, x=2)
u = V(token=CommonToken(type=102, text="102"))
self.adaptor.addChild(root_0, t)
self.adaptor.addChild(root_0, u)
self.assert_(root_0.parent is None)
self.assertEquals(-1, root_0.childIndex)
self.assertEquals(0, t.childIndex)
self.assertEquals(1, u.childIndex)
def test4Nodes(self):
# ^(101 ^(102 103) 104)
t = CommonTree(CommonToken(101))
t.addChild(CommonTree(CommonToken(102)))
t.getChild(0).addChild(CommonTree(CommonToken(103)))
t.addChild(CommonTree(CommonToken(104)))
stream = self.newStream(t)
expecting = "101 102 103 104"
found = self.toNodesOnlyString(stream)
self.failUnlessEqual(expecting, found)
expecting = "101 2 102 2 103 3 104 3"
found = str(stream)
self.failUnlessEqual(expecting, found)
def testList(self):
root = CommonTree(None)
t = CommonTree(CommonToken(101))
t.addChild(CommonTree(CommonToken(102)))
t.getChild(0).addChild(CommonTree(CommonToken(103)))
t.addChild(CommonTree(CommonToken(104)))
u = CommonTree(CommonToken(105))
root.addChild(t)
root.addChild(u)
stream = CommonTreeNodeStream(root)
expecting = "101 102 103 104 105"
found = self.toNodesOnlyString(stream)
self.failUnlessEqual(expecting, found)
expecting = "101 2 102 2 103 3 104 3 105"
found = str(stream)
self.failUnlessEqual(expecting, found)
def testFlatList(self):
root = CommonTree(None)
root.addChild(CommonTree(CommonToken(101)))
root.addChild(CommonTree(CommonToken(102)))
root.addChild(CommonTree(CommonToken(103)))
stream = CommonTreeNodeStream(root)
expecting = "101 102 103"
found = self.toNodesOnlyString(stream)
self.failUnlessEqual(expecting, found)
expecting = "101 102 103"
found = str(stream)
self.failUnlessEqual(expecting, found)
def testListWithOneNode(self):
root = CommonTree(None)
root.addChild(CommonTree(CommonToken(101)))
stream = CommonTreeNodeStream(root)
expecting = "101"
found = self.toNodesOnlyString(stream)
self.failUnlessEqual(expecting, found)
expecting = "101"
found = str(stream)
self.failUnlessEqual(expecting, found)
def testAoverB(self):
t = CommonTree(CommonToken(101))
t.addChild(CommonTree(CommonToken(102)))
stream = self.newStream(t)
expecting = "101 102"
found = self.toNodesOnlyString(stream)
self.failUnlessEqual(expecting, found)
expecting = "101 2 102 3"
found = str(stream)
self.failUnlessEqual(expecting, found)
def testLT(self):
# ^(101 ^(102 103) 104)
t = CommonTree(CommonToken(101))
t.addChild(CommonTree(CommonToken(102)))
t.getChild(0).addChild(CommonTree(CommonToken(103)))
t.addChild(CommonTree(CommonToken(104)))
stream = self.newStream(t)
self.failUnlessEqual(101, stream.LT(1).getType())
self.failUnlessEqual(DOWN, stream.LT(2).getType())
self.failUnlessEqual(102, stream.LT(3).getType())
self.failUnlessEqual(DOWN, stream.LT(4).getType())
self.failUnlessEqual(103, stream.LT(5).getType())
self.failUnlessEqual(UP, stream.LT(6).getType())
self.failUnlessEqual(104, stream.LT(7).getType())
self.failUnlessEqual(UP, stream.LT(8).getType())
self.failUnlessEqual(EOF, stream.LT(9).getType())
# check way ahead
self.failUnlessEqual(EOF, stream.LT(100).getType())
def testMarkRewindEntire(self):
# ^(101 ^(102 103 ^(106 107) ) 104 105)
# stream has 7 real + 6 nav nodes
# Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r0.addChild(r1)
r1.addChild(CommonTree(CommonToken(103)))
r2 = CommonTree(CommonToken(106))
r2.addChild(CommonTree(CommonToken(107)))
r1.addChild(r2)
r0.addChild(CommonTree(CommonToken(104)))
r0.addChild(CommonTree(CommonToken(105)))
stream = CommonTreeNodeStream(r0)
m = stream.mark() # MARK
for _ in range(13): # consume til end
stream.LT(1)
stream.consume()
self.failUnlessEqual(EOF, stream.LT(1).getType())
self.failUnlessEqual(UP, stream.LT(-1).getType())
stream.rewind(m) # REWIND
# consume til end again :)
for _ in range(13): # consume til end
stream.LT(1)
stream.consume()
self.failUnlessEqual(EOF, stream.LT(1).getType())
self.failUnlessEqual(UP, stream.LT(-1).getType())
def testMarkRewindInMiddle(self):
# ^(101 ^(102 103 ^(106 107) ) 104 105)
# stream has 7 real + 6 nav nodes
# Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r0.addChild(r1)
r1.addChild(CommonTree(CommonToken(103)))
r2 = CommonTree(CommonToken(106))
r2.addChild(CommonTree(CommonToken(107)))
r1.addChild(r2)
r0.addChild(CommonTree(CommonToken(104)))
r0.addChild(CommonTree(CommonToken(105)))
stream = CommonTreeNodeStream(r0)
for _ in range(7): # consume til middle
#System.out.println(tream.LT(1).getType())
stream.consume()
self.failUnlessEqual(107, stream.LT(1).getType())
m = stream.mark() # MARK
stream.consume() # consume 107
stream.consume() # consume UP
stream.consume() # consume UP
stream.consume() # consume 104
stream.rewind(m) # REWIND
self.failUnlessEqual(107, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(UP, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(UP, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(104, stream.LT(1).getType())
stream.consume()
# now we're past rewind position
self.failUnlessEqual(105, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(UP, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(EOF, stream.LT(1).getType())
self.failUnlessEqual(UP, stream.LT(-1).getType())
def testMarkRewindNested(self):
# ^(101 ^(102 103 ^(106 107) ) 104 105)
# stream has 7 real + 6 nav nodes
# Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r0.addChild(r1)
r1.addChild(CommonTree(CommonToken(103)))
r2 = CommonTree(CommonToken(106))
r2.addChild(CommonTree(CommonToken(107)))
r1.addChild(r2)
r0.addChild(CommonTree(CommonToken(104)))
r0.addChild(CommonTree(CommonToken(105)))
stream = CommonTreeNodeStream(r0)
m = stream.mark() # MARK at start
stream.consume() # consume 101
stream.consume() # consume DN
m2 = stream.mark() # MARK on 102
stream.consume() # consume 102
stream.consume() # consume DN
stream.consume() # consume 103
stream.consume() # consume 106
stream.rewind(m2) # REWIND to 102
self.failUnlessEqual(102, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(DOWN, stream.LT(1).getType())
stream.consume()
# stop at 103 and rewind to start
stream.rewind(m) # REWIND to 101
self.failUnlessEqual(101, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(DOWN, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(102, stream.LT(1).getType())
stream.consume()
self.failUnlessEqual(DOWN, stream.LT(1).getType())
def testSeek(self):
# ^(101 ^(102 103 ^(106 107) ) 104 105)
# stream has 7 real + 6 nav nodes
# Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r0.addChild(r1)
r1.addChild(CommonTree(CommonToken(103)))
r2 = CommonTree(CommonToken(106))
r2.addChild(CommonTree(CommonToken(107)))
r1.addChild(r2)
r0.addChild(CommonTree(CommonToken(104)))
r0.addChild(CommonTree(CommonToken(105)))
stream = CommonTreeNodeStream(r0)
stream.consume() # consume 101
stream.consume() # consume DN
stream.consume() # consume 102
stream.seek(7) # seek to 107
self.failUnlessEqual(107, stream.LT(1).getType())
stream.consume() # consume 107
stream.consume() # consume UP
stream.consume() # consume UP
self.failUnlessEqual(104, stream.LT(1).getType())
def testSeekFromStart(self):
# ^(101 ^(102 103 ^(106 107) ) 104 105)
# stream has 7 real + 6 nav nodes
# Sequence of types: 101 DN 102 DN 103 106 DN 107 UP UP 104 105 UP EOF
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r0.addChild(r1)
r1.addChild(CommonTree(CommonToken(103)))
r2 = CommonTree(CommonToken(106))
r2.addChild(CommonTree(CommonToken(107)))
r1.addChild(r2)
r0.addChild(CommonTree(CommonToken(104)))
r0.addChild(CommonTree(CommonToken(105)))
stream = CommonTreeNodeStream(r0)
stream.seek(7) # seek to 107
self.failUnlessEqual(107, stream.LT(1).getType())
stream.consume() # consume 107
stream.consume() # consume UP
stream.consume() # consume UP
self.failUnlessEqual(104, stream.LT(1).getType())
def toNodesOnlyString(self, nodes):
buf = []
for i in range(nodes.size()):
t = nodes.LT(i+1)
type = nodes.getTreeAdaptor().getType(t)
if not (type==DOWN or type==UP):
buf.append(str(type))
return ' '.join(buf)
class TestCommonTreeNodeStream(unittest.TestCase):
"""Test case for the CommonTreeNodeStream class."""
def testPushPop(self):
# ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109)
# stream has 9 real + 8 nav nodes
# Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r1.addChild(CommonTree(CommonToken(103)))
r0.addChild(r1)
r2 = CommonTree(CommonToken(104))
r2.addChild(CommonTree(CommonToken(105)))
r0.addChild(r2)
r3 = CommonTree(CommonToken(106))
r3.addChild(CommonTree(CommonToken(107)))
r0.addChild(r3)
r0.addChild(CommonTree(CommonToken(108)))
r0.addChild(CommonTree(CommonToken(109)))
stream = CommonTreeNodeStream(r0)
expecting = "101 2 102 2 103 3 104 2 105 3 106 2 107 3 108 109 3"
found = str(stream)
self.failUnlessEqual(expecting, found)
# Assume we want to hit node 107 and then "call 102" then return
indexOf102 = 2
indexOf107 = 12
for _ in range(indexOf107):# consume til 107 node
stream.consume()
# CALL 102
self.failUnlessEqual(107, stream.LT(1).getType())
stream.push(indexOf102)
self.failUnlessEqual(102, stream.LT(1).getType())
stream.consume() # consume 102
self.failUnlessEqual(DOWN, stream.LT(1).getType())
stream.consume() # consume DN
self.failUnlessEqual(103, stream.LT(1).getType())
stream.consume() # consume 103
self.failUnlessEqual(UP, stream.LT(1).getType())
# RETURN
stream.pop()
self.failUnlessEqual(107, stream.LT(1).getType())
def testNestedPushPop(self):
# ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109)
# stream has 9 real + 8 nav nodes
# Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r1.addChild(CommonTree(CommonToken(103)))
r0.addChild(r1)
r2 = CommonTree(CommonToken(104))
r2.addChild(CommonTree(CommonToken(105)))
r0.addChild(r2)
r3 = CommonTree(CommonToken(106))
r3.addChild(CommonTree(CommonToken(107)))
r0.addChild(r3)
r0.addChild(CommonTree(CommonToken(108)))
r0.addChild(CommonTree(CommonToken(109)))
stream = CommonTreeNodeStream(r0)
# Assume we want to hit node 107 and then "call 102", which
# calls 104, then return
indexOf102 = 2
indexOf107 = 12
for _ in range(indexOf107): # consume til 107 node
stream.consume()
self.failUnlessEqual(107, stream.LT(1).getType())
# CALL 102
stream.push(indexOf102)
self.failUnlessEqual(102, stream.LT(1).getType())
stream.consume() # consume 102
self.failUnlessEqual(DOWN, stream.LT(1).getType())
stream.consume() # consume DN
self.failUnlessEqual(103, stream.LT(1).getType())
stream.consume() # consume 103
# CALL 104
indexOf104 = 6
stream.push(indexOf104)
self.failUnlessEqual(104, stream.LT(1).getType())
stream.consume() # consume 102
self.failUnlessEqual(DOWN, stream.LT(1).getType())
stream.consume() # consume DN
self.failUnlessEqual(105, stream.LT(1).getType())
stream.consume() # consume 103
self.failUnlessEqual(UP, stream.LT(1).getType())
# RETURN (to UP node in 102 subtree)
stream.pop()
self.failUnlessEqual(UP, stream.LT(1).getType())
# RETURN (to empty stack)
stream.pop()
self.failUnlessEqual(107, stream.LT(1).getType())
def testPushPopFromEOF(self):
# ^(101 ^(102 103) ^(104 105) ^(106 107) 108 109)
# stream has 9 real + 8 nav nodes
# Sequence of types: 101 DN 102 DN 103 UP 104 DN 105 UP 106 DN 107 UP 108 109 UP
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r1.addChild(CommonTree(CommonToken(103)))
r0.addChild(r1)
r2 = CommonTree(CommonToken(104))
r2.addChild(CommonTree(CommonToken(105)))
r0.addChild(r2)
r3 = CommonTree(CommonToken(106))
r3.addChild(CommonTree(CommonToken(107)))
r0.addChild(r3)
r0.addChild(CommonTree(CommonToken(108)))
r0.addChild(CommonTree(CommonToken(109)))
stream = CommonTreeNodeStream(r0)
while stream.LA(1) != EOF:
stream.consume()
indexOf102 = 2
indexOf104 = 6
self.failUnlessEqual(EOF, stream.LT(1).getType())
# CALL 102
stream.push(indexOf102)
self.failUnlessEqual(102, stream.LT(1).getType())
stream.consume() # consume 102
self.failUnlessEqual(DOWN, stream.LT(1).getType())
stream.consume() # consume DN
self.failUnlessEqual(103, stream.LT(1).getType())
stream.consume() # consume 103
self.failUnlessEqual(UP, stream.LT(1).getType())
# RETURN (to empty stack)
stream.pop()
self.failUnlessEqual(EOF, stream.LT(1).getType())
# CALL 104
stream.push(indexOf104)
self.failUnlessEqual(104, stream.LT(1).getType())
stream.consume() # consume 102
self.failUnlessEqual(DOWN, stream.LT(1).getType())
stream.consume() # consume DN
self.failUnlessEqual(105, stream.LT(1).getType())
stream.consume() # consume 103
self.failUnlessEqual(UP, stream.LT(1).getType())
# RETURN (to empty stack)
stream.pop()
self.failUnlessEqual(EOF, stream.LT(1).getType())
class TestCommonTree(unittest.TestCase):
"""Test case for the CommonTree class."""
def setUp(self):
"""Setup test fixure"""
self.adaptor = CommonTreeAdaptor()
def testSingleNode(self):
t = CommonTree(CommonToken(101))
self.failUnless(t.parent is None)
self.failUnlessEqual(-1, t.childIndex)
def test4Nodes(self):
# ^(101 ^(102 103) 104)
r0 = CommonTree(CommonToken(101))
r0.addChild(CommonTree(CommonToken(102)))
r0.getChild(0).addChild(CommonTree(CommonToken(103)))
r0.addChild(CommonTree(CommonToken(104)))
self.failUnless(r0.parent is None)
self.failUnlessEqual(-1, r0.childIndex)
def testList(self):
# ^(nil 101 102 103)
r0 = CommonTree(None)
c0=CommonTree(CommonToken(101))
r0.addChild(c0)
c1=CommonTree(CommonToken(102))
r0.addChild(c1)
c2=CommonTree(CommonToken(103))
r0.addChild(c2)
self.failUnless(r0.parent is None)
self.failUnlessEqual(-1, r0.childIndex)
self.failUnlessEqual(r0, c0.parent)
self.failUnlessEqual(0, c0.childIndex)
self.failUnlessEqual(r0, c1.parent)
self.failUnlessEqual(1, c1.childIndex)
self.failUnlessEqual(r0, c2.parent)
self.failUnlessEqual(2, c2.childIndex)
def testList2(self):
# Add child ^(nil 101 102 103) to root 5
# should pull 101 102 103 directly to become 5's child list
root = CommonTree(CommonToken(5))
# child tree
r0 = CommonTree(None)
c0=CommonTree(CommonToken(101))
r0.addChild(c0)
c1=CommonTree(CommonToken(102))
r0.addChild(c1)
c2=CommonTree(CommonToken(103))
r0.addChild(c2)
root.addChild(r0)
self.failUnless(root.parent is None)
self.failUnlessEqual(-1, root.childIndex)
# check children of root all point at root
self.failUnlessEqual(root, c0.parent)
self.failUnlessEqual(0, c0.childIndex)
self.failUnlessEqual(root, c0.parent)
self.failUnlessEqual(1, c1.childIndex)
self.failUnlessEqual(root, c0.parent)
self.failUnlessEqual(2, c2.childIndex)
def testAddListToExistChildren(self):
# Add child ^(nil 101 102 103) to root ^(5 6)
# should add 101 102 103 to end of 5's child list
root = CommonTree(CommonToken(5))
root.addChild(CommonTree(CommonToken(6)))
# child tree
r0 = CommonTree(None)
c0=CommonTree(CommonToken(101))
r0.addChild(c0)
c1=CommonTree(CommonToken(102))
r0.addChild(c1)
c2=CommonTree(CommonToken(103))
r0.addChild(c2)
root.addChild(r0)
self.failUnless(root.parent is None)
self.failUnlessEqual(-1, root.childIndex)
# check children of root all point at root
self.failUnlessEqual(root, c0.parent)
self.failUnlessEqual(1, c0.childIndex)
self.failUnlessEqual(root, c0.parent)
self.failUnlessEqual(2, c1.childIndex)
self.failUnlessEqual(root, c0.parent)
self.failUnlessEqual(3, c2.childIndex)
def testDupTree(self):
# ^(101 ^(102 103 ^(106 107) ) 104 105)
r0 = CommonTree(CommonToken(101))
r1 = CommonTree(CommonToken(102))
r0.addChild(r1)
r1.addChild(CommonTree(CommonToken(103)))
r2 = CommonTree(CommonToken(106))
r2.addChild(CommonTree(CommonToken(107)))
r1.addChild(r2)
r0.addChild(CommonTree(CommonToken(104)))
r0.addChild(CommonTree(CommonToken(105)))
dup = self.adaptor.dupTree(r0)
self.failUnless(dup.parent is None)
self.failUnlessEqual(-1, dup.childIndex)
dup.sanityCheckParentAndChildIndexes()
def testBecomeRoot(self):
# 5 becomes root of ^(nil 101 102 103)
newRoot = CommonTree(CommonToken(5))
oldRoot = CommonTree(None)
oldRoot.addChild(CommonTree(CommonToken(101)))
oldRoot.addChild(CommonTree(CommonToken(102)))
oldRoot.addChild(CommonTree(CommonToken(103)))
self.adaptor.becomeRoot(newRoot, oldRoot)
newRoot.sanityCheckParentAndChildIndexes()
def testBecomeRoot2(self):
# 5 becomes root of ^(101 102 103)
newRoot = CommonTree(CommonToken(5))
oldRoot = CommonTree(CommonToken(101))
oldRoot.addChild(CommonTree(CommonToken(102)))
oldRoot.addChild(CommonTree(CommonToken(103)))
self.adaptor.becomeRoot(newRoot, oldRoot)
newRoot.sanityCheckParentAndChildIndexes()
def testBecomeRoot3(self):
# ^(nil 5) becomes root of ^(nil 101 102 103)
newRoot = CommonTree(None)
newRoot.addChild(CommonTree(CommonToken(5)))
oldRoot = CommonTree(None)
oldRoot.addChild(CommonTree(CommonToken(101)))
oldRoot.addChild(CommonTree(CommonToken(102)))
oldRoot.addChild(CommonTree(CommonToken(103)))
self.adaptor.becomeRoot(newRoot, oldRoot)
newRoot.sanityCheckParentAndChildIndexes()
def testBecomeRoot5(self):
# ^(nil 5) becomes root of ^(101 102 103)
newRoot = CommonTree(None)
newRoot.addChild(CommonTree(CommonToken(5)))
oldRoot = CommonTree(CommonToken(101))
oldRoot.addChild(CommonTree(CommonToken(102)))
oldRoot.addChild(CommonTree(CommonToken(103)))
self.adaptor.becomeRoot(newRoot, oldRoot)
newRoot.sanityCheckParentAndChildIndexes()
def testBecomeRoot6(self):
# emulates construction of ^(5 6)
root_0 = self.adaptor.nil()
root_1 = self.adaptor.nil()
root_1 = self.adaptor.becomeRoot(CommonTree(CommonToken(5)), root_1)
self.adaptor.addChild(root_1, CommonTree(CommonToken(6)))
self.adaptor.addChild(root_0, root_1)
root_0.sanityCheckParentAndChildIndexes()
# Test replaceChildren
def testReplaceWithNoChildren(self):
t = CommonTree(CommonToken(101))
newChild = CommonTree(CommonToken(5))
error = False
try:
t.replaceChildren(0, 0, newChild)
except IndexError:
error = True
self.failUnless(error)
def testReplaceWithOneChildren(self):
# assume token type 99 and use text
t = CommonTree(CommonToken(99, text="a"))
c0 = CommonTree(CommonToken(99, text="b"))
t.addChild(c0)
newChild = CommonTree(CommonToken(99, text="c"))
t.replaceChildren(0, 0, newChild)
expecting = "(a c)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceInMiddle(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c"))) # index 1
t.addChild(CommonTree(CommonToken(99, text="d")))
newChild = CommonTree(CommonToken(99, text="x"))
t.replaceChildren(1, 1, newChild)
expecting = "(a b x d)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceAtLeft(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b"))) # index 0
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d")))
newChild = CommonTree(CommonToken(99, text="x"))
t.replaceChildren(0, 0, newChild)
expecting = "(a x c d)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceAtRight(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d"))) # index 2
newChild = CommonTree(CommonToken(99, text="x"))
t.replaceChildren(2, 2, newChild)
expecting = "(a b c x)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceOneWithTwoAtLeft(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d")))
newChildren = self.adaptor.nil()
newChildren.addChild(CommonTree(CommonToken(99, text="x")))
newChildren.addChild(CommonTree(CommonToken(99, text="y")))
t.replaceChildren(0, 0, newChildren)
expecting = "(a x y c d)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceOneWithTwoAtRight(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d")))
newChildren = self.adaptor.nil()
newChildren.addChild(CommonTree(CommonToken(99, text="x")))
newChildren.addChild(CommonTree(CommonToken(99, text="y")))
t.replaceChildren(2, 2, newChildren)
expecting = "(a b c x y)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceOneWithTwoInMiddle(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d")))
newChildren = self.adaptor.nil()
newChildren.addChild(CommonTree(CommonToken(99, text="x")))
newChildren.addChild(CommonTree(CommonToken(99, text="y")))
t.replaceChildren(1, 1, newChildren)
expecting = "(a b x y d)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceTwoWithOneAtLeft(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d")))
newChild = CommonTree(CommonToken(99, text="x"))
t.replaceChildren(0, 1, newChild)
expecting = "(a x d)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceTwoWithOneAtRight(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d")))
newChild = CommonTree(CommonToken(99, text="x"))
t.replaceChildren(1, 2, newChild)
expecting = "(a b x)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceAllWithOne(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d")))
newChild = CommonTree(CommonToken(99, text="x"))
t.replaceChildren(0, 2, newChild)
expecting = "(a x)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
def testReplaceAllWithTwo(self):
t = CommonTree(CommonToken(99, text="a"))
t.addChild(CommonTree(CommonToken(99, text="b")))
t.addChild(CommonTree(CommonToken(99, text="c")))
t.addChild(CommonTree(CommonToken(99, text="d")))
newChildren = self.adaptor.nil()
newChildren.addChild(CommonTree(CommonToken(99, text="x")))
newChildren.addChild(CommonTree(CommonToken(99, text="y")))
t.replaceChildren(0, 2, newChildren)
expecting = "(a x y)"
self.failUnlessEqual(expecting, t.toStringTree())
t.sanityCheckParentAndChildIndexes()
class TestTreeContext(unittest.TestCase):
"""Test the TreeParser.inContext() method"""
tokenNames = [
"<invalid>", "<EOR>", "<DOWN>", "<UP>", "VEC", "ASSIGN", "PRINT",
"PLUS", "MULT", "DOT", "ID", "INT", "WS", "'['", "','", "']'"
]
def testSimpleParent(self):
tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))"
adaptor = CommonTreeAdaptor()
wiz = TreeWizard(adaptor, self.tokenNames)
t = wiz.create(tree)
labels = {}
valid = wiz.parse(
t,
"(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))",
labels)
self.assertTrue(valid)
node = labels.get("x")
expecting = True
found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC")
self.assertEquals(expecting, found)
def testNoParent(self):
tree = "(PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3])))"
adaptor = CommonTreeAdaptor()
wiz = TreeWizard(adaptor, self.tokenNames)
t = wiz.create(tree)
labels = {}
valid = wiz.parse(
t,
"(%x:PRINT (MULT ID (VEC INT INT INT)))",
labels)
self.assertTrue(valid)
node = labels.get("x")
expecting = False
found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC")
self.assertEquals(expecting, found)
def testParentWithWildcard(self):
tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))"
adaptor = CommonTreeAdaptor()
wiz = TreeWizard(adaptor, self.tokenNames)
t = wiz.create(tree)
labels = {}
valid = wiz.parse(
t,
"(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))",
labels)
self.assertTrue(valid)
node = labels.get("x")
expecting = True
found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC ...")
self.assertEquals(expecting, found)
def testWildcardAtStartIgnored(self):
tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))"
adaptor = CommonTreeAdaptor()
wiz = TreeWizard(adaptor, self.tokenNames)
t = wiz.create(tree)
labels = {}
valid = wiz.parse(
t,
"(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))",
labels)
self.assertTrue(valid)
node = labels.get("x")
expecting = True
found = TreeParser._inContext(adaptor, self.tokenNames, node, "...VEC")
self.assertEquals(expecting, found)
def testWildcardInBetween(self):
tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))"
adaptor = CommonTreeAdaptor()
wiz = TreeWizard(adaptor, self.tokenNames)
t = wiz.create(tree)
labels = {}
valid = wiz.parse(
t,
"(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))",
labels)
self.assertTrue(valid)
node = labels.get("x")
expecting = True
found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT...VEC")
self.assertEquals(expecting, found)
def testLotsOfWildcards(self):
tree = "(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID[x] (VEC INT[1] INT[2] INT[3]))))"
adaptor = CommonTreeAdaptor()
wiz = TreeWizard(adaptor, self.tokenNames)
t = wiz.create(tree)
labels = {}
valid = wiz.parse(
t,
"(nil (ASSIGN ID[x] INT[3]) (PRINT (MULT ID (VEC INT %x:INT INT))))",
labels)
self.assertTrue(valid)
node = labels.get("x")
expecting = True
found = TreeParser._inContext(adaptor, self.tokenNames, node, "... PRINT ... VEC ...")
self.assertEquals(expecting, found)
def testDeep(self):
tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
adaptor = CommonTreeAdaptor()
wiz = TreeWizard(adaptor, self.tokenNames)
t = wiz.create(tree)
labels = {}
valid = wiz.parse(
t,
"(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
labels)
self.assertTrue(valid)
node = labels.get("x")
expecting = True
found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC ...")
self.assertEquals(expecting, found)
def testDeepAndFindRoot(self):
tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
adaptor = CommonTreeAdaptor()
wiz = TreeWizard(adaptor, self.tokenNames)
t = wiz.create(tree)
labels = {}
valid = wiz.parse(
t,
"(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
labels)
self.assertTrue(valid)
node = labels.get("x")
expecting = True
found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT ...")
self.assertEquals(expecting, found)
def testDeepAndFindRoot2(self):
tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
adaptor = CommonTreeAdaptor()
wiz = TreeWizard(adaptor, self.tokenNames)
t = wiz.create(tree)
labels = {}
valid = wiz.parse(
t,
"(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
labels)
self.assertTrue(valid)
node = labels.get("x")
expecting = True
found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT ... VEC ...")
self.assertEquals(expecting, found)
def testChain(self):
tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
adaptor = CommonTreeAdaptor()
wiz = TreeWizard(adaptor, self.tokenNames)
t = wiz.create(tree)
labels = {}
valid = wiz.parse(
t,
"(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
labels)
self.assertTrue(valid)
node = labels.get("x")
expecting = True
found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT MULT VEC MULT")
self.assertEquals(expecting, found)
## TEST INVALID CONTEXTS
def testNotParent(self):
tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
adaptor = CommonTreeAdaptor()
wiz = TreeWizard(adaptor, self.tokenNames)
t = wiz.create(tree)
labels = {}
valid = wiz.parse(
t,
"(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
labels)
self.assertTrue(valid)
node = labels.get("x")
expecting = False
found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC")
self.assertEquals(expecting, found)
def testMismatch(self):
tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
adaptor = CommonTreeAdaptor()
wiz = TreeWizard(adaptor, self.tokenNames)
t = wiz.create(tree)
labels = {}
valid = wiz.parse(
t,
"(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
labels)
self.assertTrue(valid)
node = labels.get("x")
expecting = False
## missing MULT
found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT VEC MULT")
self.assertEquals(expecting, found)
def testMismatch2(self):
tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
adaptor = CommonTreeAdaptor()
wiz = TreeWizard(adaptor, self.tokenNames)
t = wiz.create(tree)
labels = {}
valid = wiz.parse(
t,
"(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
labels)
self.assertTrue(valid)
node = labels.get("x")
expecting = False
found = TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT VEC ...")
self.assertEquals(expecting, found)
def testMismatch3(self):
tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
adaptor = CommonTreeAdaptor()
wiz = TreeWizard(adaptor, self.tokenNames)
t = wiz.create(tree)
labels = {}
valid = wiz.parse(
t,
"(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
labels)
self.assertTrue(valid)
node = labels.get("x")
expecting = False
found = TreeParser._inContext(adaptor, self.tokenNames, node, "VEC ... VEC MULT")
self.assertEquals(expecting, found)
def testDoubleEtc(self):
tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
adaptor = CommonTreeAdaptor()
wiz = TreeWizard(adaptor, self.tokenNames)
t = wiz.create(tree)
labels = {}
valid = wiz.parse(
t,
"(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
labels)
self.assertTrue(valid)
node = labels.get("x")
try:
TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT ... ... VEC")
self.fail()
except ValueError, exc:
expecting = "invalid syntax: ... ..."
found = str(exc)
self.assertEquals(expecting, found)
def testDotDot(self):
tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
adaptor = CommonTreeAdaptor()
wiz = TreeWizard(adaptor, self.tokenNames)
t = wiz.create(tree)
labels = {}
valid = wiz.parse(
t,
"(PRINT (MULT ID (VEC (MULT INT %x:INT) INT INT)))",
labels)
self.assertTrue(valid)
node = labels.get("x")
try:
TreeParser._inContext(adaptor, self.tokenNames, node, "PRINT .. VEC")
self.fail()
except ValueError, exc:
expecting = "invalid syntax: .."
found = str(exc)
self.assertEquals(expecting, found)
class TestTreeVisitor(unittest.TestCase):
"""Test of the TreeVisitor class."""
tokenNames = [
"<invalid>", "<EOR>", "<DOWN>", "<UP>", "VEC", "ASSIGN", "PRINT",
"PLUS", "MULT", "DOT", "ID", "INT", "WS", "'['", "','", "']'"
]
def testTreeVisitor(self):
tree = "(PRINT (MULT ID[x] (VEC (MULT INT[9] INT[1]) INT[2] INT[3])))"
adaptor = CommonTreeAdaptor()
wiz = TreeWizard(adaptor, self.tokenNames)
t = wiz.create(tree)
found = []
def pre(t):
found.append("pre(%s)" % t)
return t
def post(t):
found.append("post(%s)" % t)
return t
visitor = TreeVisitor(adaptor)
visitor.visit(t, pre, post)
expecting = [ "pre(PRINT)", "pre(MULT)", "pre(x)", "post(x)",
"pre(VEC)", "pre(MULT)", "pre(9)", "post(9)", "pre(1)",
"post(1)", "post(MULT)", "pre(2)", "post(2)", "pre(3)",
"post(3)", "post(VEC)", "post(MULT)", "post(PRINT)" ]
self.assertEquals(expecting, found)
if __name__ == "__main__":
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| bsd-3-clause |
2ndQuadrant/ansible | test/sanity/code-smell/shebang.py | 1 | 4826 | #!/usr/bin/env python
import os
import stat
import sys
def main():
standard_shebangs = set([
b'#!/bin/bash -eu',
b'#!/bin/bash -eux',
b'#!/bin/sh',
b'#!/usr/bin/env bash',
b'#!/usr/bin/env fish',
b'#!/usr/bin/env pwsh',
b'#!/usr/bin/env python',
b'#!/usr/bin/make -f',
])
integration_shebangs = set([
b'#!/bin/sh',
b'#!/usr/bin/env bash',
b'#!/usr/bin/env python',
])
module_shebangs = {
'': b'#!/usr/bin/python',
'.py': b'#!/usr/bin/python',
'.ps1': b'#!powershell',
}
skip = set([
'test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_csbasic_only.ps1',
'test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.ps1',
'test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_csmu.ps1',
'test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_psmu.ps1',
'test/integration/targets/win_module_utils/library/legacy_only_new_way_win_line_ending.ps1',
'test/integration/targets/win_module_utils/library/legacy_only_old_way_win_line_ending.ps1',
'test/utils/shippable/timing.py',
'test/integration/targets/old_style_modules_posix/library/helloworld.sh',
'test/integration/targets/gathering_facts/library/bogus_facts',
# Python 3-only. Only run by release engineers
'hacking/release-announcement.py',
])
# see https://unicode.org/faq/utf_bom.html#bom1
byte_order_marks = (
(b'\x00\x00\xFE\xFF', 'UTF-32 (BE)'),
(b'\xFF\xFE\x00\x00', 'UTF-32 (LE)'),
(b'\xFE\xFF', 'UTF-16 (BE)'),
(b'\xFF\xFE', 'UTF-16 (LE)'),
(b'\xEF\xBB\xBF', 'UTF-8'),
)
for path in sys.argv[1:] or sys.stdin.read().splitlines():
if path in skip:
continue
with open(path, 'rb') as path_fd:
shebang = path_fd.readline().strip()
mode = os.stat(path).st_mode
executable = (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & mode
if not shebang or not shebang.startswith(b'#!'):
if executable:
print('%s:%d:%d: file without shebang should not be executable' % (path, 0, 0))
for mark, name in byte_order_marks:
if shebang.startswith(mark):
print('%s:%d:%d: file starts with a %s byte order mark' % (path, 0, 0, name))
break
continue
is_module = False
is_integration = False
if path.startswith('lib/ansible/modules/'):
is_module = True
elif path.startswith('lib/') or path.startswith('test/runner/lib/'):
if executable:
print('%s:%d:%d: should not be executable' % (path, 0, 0))
if shebang:
print('%s:%d:%d: should not have a shebang' % (path, 0, 0))
continue
elif path.startswith('test/integration/targets/'):
is_integration = True
dirname = os.path.dirname(path)
if dirname.endswith('/library') or dirname in (
# non-standard module library directories
'test/integration/targets/module_precedence/lib_no_extension',
'test/integration/targets/module_precedence/lib_with_extension',
):
is_module = True
if is_module:
if executable:
print('%s:%d:%d: module should not be executable' % (path, 0, 0))
ext = os.path.splitext(path)[1]
expected_shebang = module_shebangs.get(ext)
expected_ext = ' or '.join(['"%s"' % k for k in module_shebangs])
if expected_shebang:
if shebang == expected_shebang:
continue
print('%s:%d:%d: expected module shebang "%s" but found: %s' % (path, 1, 1, expected_shebang, shebang))
else:
print('%s:%d:%d: expected module extension %s but found: %s' % (path, 0, 0, expected_ext, ext))
else:
if is_integration:
allowed = integration_shebangs
else:
allowed = standard_shebangs
if shebang not in allowed:
print('%s:%d:%d: unexpected non-module shebang: %s' % (path, 1, 1, shebang))
if __name__ == '__main__':
main()
| gpl-3.0 |
michaelgallacher/intellij-community | python/helpers/py3only/docutils/parsers/rst/languages/cs.py | 52 | 4781 | # $Id: cs.py 7119 2011-09-02 13:00:23Z milde $
# Author: Marek Blaha <mb@dat.cz>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Czech-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'pozor': 'attention',
'caution (translation required)': 'caution', # jak rozlisit caution a warning?
'code (translation required)': 'code',
'nebezpe\u010D\u00ED': 'danger',
'chyba': 'error',
'rada': 'hint',
'd\u016Fle\u017Eit\u00E9': 'important',
'pozn\u00E1mka': 'note',
'tip (translation required)': 'tip',
'varov\u00E1n\u00ED': 'warning',
'admonition (translation required)': 'admonition',
'sidebar (translation required)': 'sidebar',
't\u00E9ma': 'topic',
'line-block (translation required)': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
'odd\u00EDl': 'rubric',
'moto': 'epigraph',
'highlights (translation required)': 'highlights',
'pull-quote (translation required)': 'pull-quote',
'compound (translation required)': 'compound',
'container (translation required)': 'container',
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
'table (translation required)': 'table',
'csv-table (translation required)': 'csv-table',
'list-table (translation required)': 'list-table',
'math (translation required)': 'math',
'meta (translation required)': 'meta',
#'imagemap': 'imagemap',
'image (translation required)': 'image', # obrazek
'figure (translation required)': 'figure', # a tady?
'include (translation required)': 'include',
'raw (translation required)': 'raw',
'replace (translation required)': 'replace',
'unicode (translation required)': 'unicode',
'datum': 'date',
't\u0159\u00EDda': 'class',
'role (translation required)': 'role',
'default-role (translation required)': 'default-role',
'title (translation required)': 'title',
'obsah': 'contents',
'sectnum (translation required)': 'sectnum',
'section-numbering (translation required)': 'sectnum',
'header (translation required)': 'header',
'footer (translation required)': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
'target-notes (translation required)': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Czech name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
'abbreviation (translation required)': 'abbreviation',
'ab (translation required)': 'abbreviation',
'acronym (translation required)': 'acronym',
'ac (translation required)': 'acronym',
'code (translation required)': 'code',
'index (translation required)': 'index',
'i (translation required)': 'index',
'subscript (translation required)': 'subscript',
'sub (translation required)': 'subscript',
'superscript (translation required)': 'superscript',
'sup (translation required)': 'superscript',
'title-reference (translation required)': 'title-reference',
'title (translation required)': 'title-reference',
't (translation required)': 'title-reference',
'pep-reference (translation required)': 'pep-reference',
'pep (translation required)': 'pep-reference',
'rfc-reference (translation required)': 'rfc-reference',
'rfc (translation required)': 'rfc-reference',
'emphasis (translation required)': 'emphasis',
'strong (translation required)': 'strong',
'literal (translation required)': 'literal',
'math (translation required)': 'math',
'named-reference (translation required)': 'named-reference',
'anonymous-reference (translation required)': 'anonymous-reference',
'footnote-reference (translation required)': 'footnote-reference',
'citation-reference (translation required)': 'citation-reference',
'substitution-reference (translation required)': 'substitution-reference',
'target (translation required)': 'target',
'uri-reference (translation required)': 'uri-reference',
'uri (translation required)': 'uri-reference',
'url (translation required)': 'uri-reference',
'raw (translation required)': 'raw',}
"""Mapping of Czech role names to canonical role names for interpreted text.
"""
| apache-2.0 |
celiafish/VisTrails | contrib/NumSciPy/ArrayOperations.py | 6 | 25493 | from core.modules.vistrails_module import Module, ModuleError
from Array import *
class ArrayOperationModule(object):
my_namespace = 'numpy|array|operations'
class ArrayReshape(ArrayOperationModule, Module):
""" Reshape the input array. The dimension sizes are presented
and used to reshape the array. Please note that the total number
of elements in the array must remain the same before and after
reshaping. """
def compute(self):
a = self.get_input("Array")
dims = self.get_input("Dims")
newdims = []
for i in xrange(dims):
pname = "dim" + str(i)
newdims.append(self.get_input(pname))
try:
a.reshape(tuple(newdims))
except:
raise ModuleError("Could not assign new shape. Be sure the number of elements remains constant")
self.set_output("Array Output", a.copy())
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, name="ReshapeArray", namespace=cls.my_namespace)
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_input_port(cls, "Dims", (basic.Integer, 'New Dimensionality'))
reg.add_input_port(cls, "dim0", (basic.Integer, 'Dimension Size'))
reg.add_input_port(cls, "dim1", (basic.Integer, 'Dimension Size'))
reg.add_input_port(cls, "dim2", (basic.Integer, 'Dimension Size'))
reg.add_input_port(cls, "dim3", (basic.Integer, 'Dimension Size'), True)
reg.add_input_port(cls, "dim4", (basic.Integer, 'Dimension Size'), True)
reg.add_input_port(cls, "dim5", (basic.Integer, 'Dimension Size'), True)
reg.add_input_port(cls, "dim6", (basic.Integer, 'Dimension Size'), True)
reg.add_output_port(cls, "Array Output", (NDArray, 'Output Array'))
class ArrayCumulativeSum(ArrayOperationModule, Module):
""" Get the cumulative sum of a given array. This is returned as a
flattened array of the same size as the input where each element
of the array serves as the cumulative sum up until that point."""
def compute(self):
a = self.get_input("Array")
b = a.cumulative_sum()
out = NDArray()
out.set_array(b)
self.set_output("Array Output", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_output_port(cls, "Array Output", (NDArray, 'Output Array'))
class ArrayScalarMultiply(ArrayOperationModule, Module):
""" Multiply the input array with a given scalar """
def compute(self):
a = self.get_input("Array")
b = self.get_input("Scalar")
out = NDArray()
out.set_array(a.get_array() * b)
self.set_output("Array Output", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_input_port(cls, "Scalar", (basic.Float, 'Input Scalar'))
reg.add_output_port(cls, "Array Output", (NDArray, 'Output Array'))
class ArraySort(ArrayOperationModule, Module):
def __init__(self):
Module.__init__(self)
self.axis = -1
self.kind = 'quicksort'
self.order = None
""" Sort the input array. By default, a flattened representation
of the input array is used as input to a quicksort. Optional
inputs are the axis in which to sort on and the type of sort to
use.
Sorting algorithms supported:
quicksort - best average speed, unstable
mergesort - needs additional working memory, stable
heapsort - good worst-case performance, unstable
"""
def compute(self):
a = self.get_input("Array")
if self.has_input("Axis"):
self.axis = self.get_input("Axis")
if self.has_input("Sort"):
self.kind = self.get_input("Sort")
if self.has_input("Order"):
self.order = self.get_input("Order")
b = a.sort_array(axis=self.axis, kind=self.kind, order=self.order)
out = NDArray()
out.set_array(b.copy())
self.set_output("Sorted Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_input_port(cls, "Axis", (basic.Integer, 'Axis to sort'), True)
reg.add_input_port(cls, "Sort", (basic.String, 'Sort Algorithm'), True)
reg.add_input_port(cls, "Order", (basic.Integer, 'Order'),True)
reg.add_output_port(cls, "Sorted Array", (NDArray, 'Sorted Array'))
class ArrayCumulativeProduct(ArrayOperationModule, Module):
""" Get the cumulative product of a given array. This is returned as
a flattened array of the same size as the input where each element of
the array serves as the cumulative product up until that point"""
def compute(self):
a = self.get_input("Array")
b = a.cumulative_product()
out = NDArray()
out.set_array(b)
self.set_output("Array Output", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_output_port(cls, "Array Output", (NDArray, 'Output Array'))
class ArrayFill(ArrayOperationModule, Module):
""" Fill the input array with the given value. If no value is given
it is filled with 0.0"""
def compute(self):
a = self.get_input("Array")
if self.has_input("Value"):
val = self.get_input("Value")
else:
val = 0.
a.fill_array(val)
self.set_output("Array Output", a)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, name="Fill Array", namespace=cls.my_namespace)
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_input_port(cls, "Value", (basic.Float, 'Value'))
reg.add_output_port(cls, "Array Output", (NDArray, 'Output Array'))
class ArrayResize(ArrayOperationModule, Module):
""" Resize the input array. Unlike the ArrayReshape module,
the number of elements of the array need not be conserved.
If the shape is larger than the input array size, repeated
copies of the input array will be copied to the resized version.
If the shape is smaller, the input array will be cropped appropriately.
"""
def compute(self):
a = self.get_input("Array")
dims = self.get_input("Dims")
newdims = []
for i in xrange(dims):
pname = "dim" + str(i)
newdims.append(self.get_input(pname))
try:
t = tuple(newdims)
b = a.resize(t)
out = NDArray()
out.set_array(b.copy())
except:
raise ModuleError("Could not assign new shape.")
self.set_output("Array Output", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_input_port(cls, "Dims", (basic.Integer, 'Output Dimensionality'))
reg.add_input_port(cls, "dim0", (basic.Integer, 'Dimension Size'))
reg.add_input_port(cls, "dim1", (basic.Integer, 'Dimension Size'))
reg.add_input_port(cls, "dim2", (basic.Integer, 'Dimension Size'))
reg.add_input_port(cls, "dim3", (basic.Integer, 'Dimension Size'), True)
reg.add_input_port(cls, "dim4", (basic.Integer, 'Dimension Size'), True)
reg.add_input_port(cls, "dim5", (basic.Integer, 'Dimension Size'), True)
reg.add_input_port(cls, "dim6", (basic.Integer, 'Dimension Size'), True)
reg.add_input_port(cls, "dim7", (basic.Integer, 'Dimension Size'), True)
reg.add_output_port(cls, "Array Output", (NDArray, 'Output Array'))
class ArrayExtractRegion(ArrayOperationModule, Module):
""" Extract a region from array as specified by the
dimension and starting and ending indices """
def compute(self):
import operator
a = self.get_input("Array")
dims = self.get_input("Dims")
a_dims = len(a.get_shape())
if dims > a_dims:
raise ModuleError("Output Dimensionality larger than Input Dimensionality")
slices = []
for i in xrange(dims):
(start, stop) = self.get_input("dim"+str(i))
slices.append(slice(start, stop))
ar = operator.__getitem__(a.get_array(), tuple(slices))
out = NDArray()
out.set_array(ar)
self.set_output("Array Output", out)
@classmethod
def register(cls, reg, basic):
l = [basic.Integer, basic.Integer]
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_input_port(cls, "Dims", (basic.Integer, 'Output Dimensionality'))
reg.add_input_port(cls, "dim0", l, True)
reg.add_input_port(cls, "dim1", l, True)
reg.add_input_port(cls, "dim2", l, True)
reg.add_input_port(cls, "dim3", l, True)
reg.add_input_port(cls, "dim4", l, True)
reg.add_input_port(cls, "dim5", l, True)
reg.add_input_port(cls, "dim6", l, True)
reg.add_input_port(cls, "dim7", l, True)
reg.add_output_port(cls, "Array Output", (NDArray, 'Output Array'))
class ArrayRavel(ArrayOperationModule, Module):
""" Get a 1D array containing the elements of the input array"""
def compute(self):
a = self.get_input("Array")
b = NDArray()
b.set_array(a.ravel().copy())
self.set_output("Array Output", b)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_output_port(cls, "Array Output", (NDArray, 'Output Array'))
class ArrayRound(ArrayOperationModule, Module):
""" Round each element of the array to the given number of
decimal places. This defaults to 0 resulting in a rounding
to integers """
def __init__(self):
Module.__init__(self)
self.decimals = 0
def compute(self):
a = self.get_input("Array")
if self.has_input("Decimals"):
self.decimals = self.get_input("Decimals")
out = NDArray()
out.set_array(a.round(precision=self.decimals))
self.set_output("Array Output", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_input_port(cls, "Decimals", (basic.Integer, 'Precision'))
reg.add_output_port(cls, "Array Output", (NDArray, 'Output Array'))
class ArrayGetSigma(ArrayOperationModule, Module):
""" Return the standard deviation of elements in the array """
def __init__(self):
Module.__init__(self)
self.axis=None
def compute(self):
a = self.get_input("Array")
if self.has_input("Axis"):
self.axis = self.get_input("Axis")
out = NDArray()
out.set_array(a.get_standard_deviation(self.axis))
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, name="ArrayStandardDeviation", namespace=cls.my_namespace)
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_input_port(cls, "Axis", (basic.Integer, 'Axis'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class ArraySum(ArrayOperationModule, Module):
""" Get the sum of all elements in the input array """
def compute(self):
a = self.get_input("Array")
self.set_output("Array Sum", float(a.get_sum()))
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_output_port(cls, "Array Sum", (basic.Float, 'Sum'))
class ArrayElementMultiply(ArrayOperationModule, Module):
""" Perform an element-wise multiply on the elements of two arrays """
def compute(self):
a1 = self.get_input("Array1")
a2 = self.get_input("Array2")
out = NDArray()
out.set_array(a1.get_array() * a2.get_array())
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array1", (NDArray, 'Input Array 1'))
reg.add_input_port(cls, "Array2", (NDArray, 'Input Array 2'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class ArraySetElement(ArrayOperationModule, Module):
""" Set a set of array elements given arrays of values and indices
Please note that this module creates a copy of the input to operate
on to preserve the original array data. """
def compute(self):
a = self.get_input("Array")
if self.has_input("Scalar Value"):
self.v = self.get_input("Scalar Value")
else:
self.v = self.get_input("Value Array")
if self.has_input("Single Index"):
self.ind = self.get_input("Single Index")
else:
self.ind = self.get_input("Index Array")
out_a = a.copy()
out_a.put(self.ind, self.v)
out = NDArray()
out.set_array(out_a)
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Scalar Value", (basic.Float, 'Value to Set'))
reg.add_input_port(cls, "Value Array", (NDArray, 'Values to Set'))
reg.add_input_port(cls, "Single Index", (basic.Integer, 'Index to Set'))
reg.add_input_port(cls, "Index Array", (NDArray, 'Indexes to Set'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class ArrayVariance(ArrayOperationModule, Module):
""" Calculate the variance of the elements of an array """
def compute(self):
a = self.get_input("Array")
if self.has_input("Axis"):
self.set_output("Variance", float(a.get_variance(axis=self.get_input("Axis"))))
else:
self.set_output("Variance", float(a.get_variance()))
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_input_port(cls, "Axis", (basic.Integer, 'Axis'))
reg.add_output_port(cls, "Variance", (basic.Float, 'Variance'))
class ArrayTrace(ArrayOperationModule, Module):
""" Calculate the trace of the input array.
The input array must have at least rank 2. The trace is taken
on the diagonal given by the inputs Axis1 and Axis2 using the
given Offset. If these values are not supplied, they default to:
Axis1 = 0
Axis2 = 1
Offset = 0
"""
def compute(self):
a = self.get_input("Array")
if self.has_input("Axis1"):
self.axis1 = self.get_input("Axis1")
else:
self.axis1 = 0
if self.has_input("Axis2"):
self.axis2 = self.get_input("Axis2")
else:
self.axis2 = 1
if self.has_input("Offset"):
self.offset = self.get_input("Offset")
else:
self.offset = 0
self.set_output("Trace", float(a.get_trace(self.offset, self.axis1, self.axis2)))
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_input_port(cls, "Axis1", (basic.Integer, 'Axis 1'))
reg.add_input_port(cls, "Axis2", (basic.Integer, 'Axis 2'))
reg.add_input_port(cls, "Offset", (basic.Integer, 'Offset'))
reg.add_output_port(cls, "Trace", (basic.Float, 'Array Trace'))
class ArraySwapAxes(ArrayOperationModule, Module):
""" Create a new view of the input array with the
given axes swapped.
"""
def compute(self):
a = self.get_input("Array")
a1 = self.get_input("Axis1")
a2 = self.get_input("Axis2")
out = NDArray()
out.set_array(a.swap_axes(a1, a2).copy())
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_input_port(cls, "Axis1", (basic.Integer, 'Axis 1'))
reg.add_input_port(cls, "Axis2", (basic.Integer, 'Axis 2'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class ArraySqueeze(ArrayOperationModule, Module):
""" Eliminate all length-1 dimensions in the input array. """
def compute(self):
a = self.get_input("Array")
out = NDArray()
out.set_array(a.get_array().squeeze().copy())
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class ArrayAdd(ArrayOperationModule, Module):
""" Add two arrays of the same size and shape """
def compute(self):
a1 = self.get_input("Array One").get_array()
a2 = self.get_input("Array Two").get_array()
if a1.shape != a2.shape:
raise ModuleError("Cannot add arrays with different shapes")
out = NDArray()
out.set_array(a1 + a2)
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array One", (NDArray, 'Input Array 1'))
reg.add_input_port(cls, "Array Two", (NDArray, 'Input Array 2'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class ArrayScalarAdd(ArrayOperationModule, Module):
""" Add two arrays of the same size and shape """
def compute(self):
a1 = self.get_input("Array One").get_array()
s = self.get_input("Scalar")
out = NDArray()
out.set_array(a1 + s)
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array One", (NDArray, 'Input Array 1'))
reg.add_input_port(cls, "Scalar", (basic.Float, 'Scalar'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class ArrayLog10(ArrayOperationModule, Module):
""" Take the base-10 log of each element in the input array """
def compute(self):
a = self.get_input("Array").get_array()
out = NDArray()
out.set_array(numpy.log10(a))
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class ArrayAtan2(ArrayOperationModule, Module):
""" Calculate the oriented arc-tangent of a vector stored as two arrays.
Reals: Real components of complex vectors
Imaginaries: Imaginary components of complex vectors
"""
def compute(self):
r = self.get_input("Reals").get_array()
i = self.get_input("Imaginaries").get_array()
out = NDArray()
out.set_array(numpy.arctan2(r,i))
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Reals", (NDArray, 'Real Components'))
reg.add_input_port(cls, "Imaginaries", (NDArray, 'Imaginary Components'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class ArraySqrt(ArrayOperationModule, Module):
""" Calculate the element-wise square root of the input array """
def compute(self):
a = self.get_input("Input Array").get_array()
out = NDArray()
out.set_array(numpy.sqrt(a))
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Input Array", (NDArray, 'Input Array'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class ArrayThreshold(ArrayOperationModule, Module):
""" Threshold the array keeping only the values above the scalar value, v. """
def compute(self):
in_ar = self.get_input("Input Array").get_array()
v = self.get_input("Value")
r = self.force_get_input("Replacement")
if r == None:
r = 0.
out = NDArray()
out.set_array(numpy.where(in_ar > v, in_ar, r))
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Input Array", (NDArray, 'Input Array'))
reg.add_input_port(cls, "Value", (basic.Float, 'Threshold Value'))
reg.add_input_port(cls, "Replacement", (basic.Float, 'Replacement Value'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class ArrayWindow(ArrayOperationModule, Module):
""" Threshold the array from both above and below, keeping only
the values within the window. """
def compute(self):
in_ar = self.get_input("Input Array").get_array()
lo = self.force_get_input("Lower Bound")
hi = self.force_get_input("Upper Bound")
r = self.force_get_input("Replacement")
if r == None:
r = 0.
if lo == None:
lo = in_ar.min()
if hi == None:
hi = in_ar.max()
out = NDArray()
o = numpy.where(in_ar >= lo, in_ar, r)
o = numpy.where(o <= hi, o, r)
out.set_array(o)
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Input Array", (NDArray, 'Input Array'))
reg.add_input_port(cls, "Lower Bound", (basic.Float, 'Lower Threshold Value'))
reg.add_input_port(cls, "Upper Bound", (basic.Float, 'Upper Threshold Value'))
reg.add_input_port(cls, "Replacement", (basic.Float, 'Replacement Value'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class ArrayNormalize(ArrayOperationModule, Module):
""" Normalize the input array """
def compute(self):
in_ar = self.get_input("Input Array").get_array()
ar = numpy.zeros(in_ar.shape)
if self.force_get_input("Planes"):
for i in range(in_ar.shape[0]):
p = in_ar[i] - in_ar[i].min()
ar[i] = p / p.max()
else:
ar = in_ar - in_ar.min()
ar = ar/ar.max()
out = NDArray()
out.set_array(ar)
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Input Array", (NDArray, 'Input Array'))
reg.add_input_port(cls, "Planes", (basic.Boolean, 'Plane-wise normalization'), True)
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class ArrayName(ArrayOperationModule, Module):
""" Assign a name or label to the entries of an array """
def compute(self):
in_ar = self.get_input("Input Array")
gen_name = self.force_get_input("Name")
one_index = self.force_get_input("One Indexed")
if gen_name:
in_ar.set_name(gen_name, index=one_index)
name_list = self.force_get_input_list("Row Name")
if name_list != None:
for (i,n) in name_list:
in_ar.set_row_name(n, i)
dname = self.force_get_input("Domain Name")
if dname:
in_ar.set_domain_name(dname)
rname = self.force_get_input("Range Name")
if rname:
in_ar.set_range_name(rname)
self.set_output("Output Array", in_ar)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Input Array", (NDArray, 'Input Array'))
reg.add_input_port(cls, "One Indexed", (basic.Boolean, 'One Indexed'))
reg.add_input_port(cls, "Name", (basic.String, 'Array Name'))
reg.add_input_port(cls, "Row Name", [basic.Integer, basic.String], True)
reg.add_input_port(cls, "Domain Name", (basic.String, 'Domain Label'))
reg.add_input_port(cls, "Range Name", (basic.String, 'Range Label'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
| bsd-3-clause |
sylvinite/standalone_scripts | set_bioinforesponsible.py | 4 | 3296 |
""" Calls up the genologics LIMS directly in order to more quickly
set a bioinformatics responsible. Script can easily be altered
to be used to set other values."""
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from genologics.lims import Lims
from genologics.config import BASEURI, USERNAME, PASSWORD
from genologics.entities import Udfconfig
def namesetter(PID):
lims = Lims(BASEURI, USERNAME, PASSWORD)
lims.check_version()
#Find LIMS entry with same PID
allProjects = lims.get_projects()
for proj in allProjects:
if proj.id == PID:
limsproject = proj.name
break
#Error handling
if not 'limsproject' in locals():
print "{} not available in LIMS.".format(PID)
return None
#Enter project summary process
stepname=['Project Summary 1.3']
process=lims.get_processes(type=stepname, projectname=limsproject)
#Error handling
if process == []:
print "{} for {} is not available in LIMS.".format(stepname, limsproject)
return None
loop = True
while loop:
if "Bioinfo responsible" in process[0].udf:
response = process[0].udf["Bioinfo responsible"]
else:
response = "Unassigned"
print "Existing Bioinfo responsible for project {} aka {} is: {}".format(limsproject, PID, response.encode('utf-8'))
#Checks for valid name
in_responsibles = False
config_responsibles =Udfconfig(lims, id="1128")
while not in_responsibles:
newname = raw_input("Enter name of new Bioinfo responsible: ")
for names in config_responsibles.presets:
if newname in names:
in_responsibles = True
newname = names
if not in_responsibles:
print "Subset {} not found in accepted Bioinfo responsible list.".format(newname)
else:
print "Suggested name is {}".format(newname)
confirmation = raw_input("Project {} aka {} will have {} as new Bioinfo responsible, is this correct (Y/N)? ".format(limsproject, PID, newname))
if confirmation == 'Y' or confirmation == 'y':
try:
newname.decode('ascii')
process[0].udf["Bioinfo responsible"] = unicode(newname)
process[0].put()
print "Project {} aka {} assigned to {}".format(limsproject, PID, newname)
return None
except UnicodeDecodeError:
#Weird solution due to put function
process[0].udf["Bioinfo responsible"] = response
print "ERROR: You tried to use a special character, didn't you? Don't do that. New standards and stuff..."
elif confirmation == 'N' or confirmation == 'n':
loop = False
else:
print "Invalid answer."
looping = True
print "---- Bioinformatical (re)assignment application ----"
print "Connected to", BASEURI
while looping:
print ("---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ")
pid = raw_input("Enter the PID of the project you'd wish to (re)assign or Q to quit: ")
if pid != 'q' and pid != 'Q':
namesetter(pid)
else:
looping = False
| mit |
bricky/xbmc-addon-tvtumbler | resources/lib/unidecode/x074.py | 252 | 4696 | data = (
'Han ', # 0x00
'Xuan ', # 0x01
'Yan ', # 0x02
'Qiu ', # 0x03
'Quan ', # 0x04
'Lang ', # 0x05
'Li ', # 0x06
'Xiu ', # 0x07
'Fu ', # 0x08
'Liu ', # 0x09
'Ye ', # 0x0a
'Xi ', # 0x0b
'Ling ', # 0x0c
'Li ', # 0x0d
'Jin ', # 0x0e
'Lian ', # 0x0f
'Suo ', # 0x10
'Chiisai ', # 0x11
'[?] ', # 0x12
'Wan ', # 0x13
'Dian ', # 0x14
'Pin ', # 0x15
'Zhan ', # 0x16
'Cui ', # 0x17
'Min ', # 0x18
'Yu ', # 0x19
'Ju ', # 0x1a
'Chen ', # 0x1b
'Lai ', # 0x1c
'Wen ', # 0x1d
'Sheng ', # 0x1e
'Wei ', # 0x1f
'Dian ', # 0x20
'Chu ', # 0x21
'Zhuo ', # 0x22
'Pei ', # 0x23
'Cheng ', # 0x24
'Hu ', # 0x25
'Qi ', # 0x26
'E ', # 0x27
'Kun ', # 0x28
'Chang ', # 0x29
'Qi ', # 0x2a
'Beng ', # 0x2b
'Wan ', # 0x2c
'Lu ', # 0x2d
'Cong ', # 0x2e
'Guan ', # 0x2f
'Yan ', # 0x30
'Diao ', # 0x31
'Bei ', # 0x32
'Lin ', # 0x33
'Qin ', # 0x34
'Pi ', # 0x35
'Pa ', # 0x36
'Que ', # 0x37
'Zhuo ', # 0x38
'Qin ', # 0x39
'Fa ', # 0x3a
'[?] ', # 0x3b
'Qiong ', # 0x3c
'Du ', # 0x3d
'Jie ', # 0x3e
'Hun ', # 0x3f
'Yu ', # 0x40
'Mao ', # 0x41
'Mei ', # 0x42
'Chun ', # 0x43
'Xuan ', # 0x44
'Ti ', # 0x45
'Xing ', # 0x46
'Dai ', # 0x47
'Rou ', # 0x48
'Min ', # 0x49
'Zhen ', # 0x4a
'Wei ', # 0x4b
'Ruan ', # 0x4c
'Huan ', # 0x4d
'Jie ', # 0x4e
'Chuan ', # 0x4f
'Jian ', # 0x50
'Zhuan ', # 0x51
'Yang ', # 0x52
'Lian ', # 0x53
'Quan ', # 0x54
'Xia ', # 0x55
'Duan ', # 0x56
'Yuan ', # 0x57
'Ye ', # 0x58
'Nao ', # 0x59
'Hu ', # 0x5a
'Ying ', # 0x5b
'Yu ', # 0x5c
'Huang ', # 0x5d
'Rui ', # 0x5e
'Se ', # 0x5f
'Liu ', # 0x60
'Shi ', # 0x61
'Rong ', # 0x62
'Suo ', # 0x63
'Yao ', # 0x64
'Wen ', # 0x65
'Wu ', # 0x66
'Jin ', # 0x67
'Jin ', # 0x68
'Ying ', # 0x69
'Ma ', # 0x6a
'Tao ', # 0x6b
'Liu ', # 0x6c
'Tang ', # 0x6d
'Li ', # 0x6e
'Lang ', # 0x6f
'Gui ', # 0x70
'Zhen ', # 0x71
'Qiang ', # 0x72
'Cuo ', # 0x73
'Jue ', # 0x74
'Zhao ', # 0x75
'Yao ', # 0x76
'Ai ', # 0x77
'Bin ', # 0x78
'Tu ', # 0x79
'Chang ', # 0x7a
'Kun ', # 0x7b
'Zhuan ', # 0x7c
'Cong ', # 0x7d
'Jin ', # 0x7e
'Yi ', # 0x7f
'Cui ', # 0x80
'Cong ', # 0x81
'Qi ', # 0x82
'Li ', # 0x83
'Ying ', # 0x84
'Suo ', # 0x85
'Qiu ', # 0x86
'Xuan ', # 0x87
'Ao ', # 0x88
'Lian ', # 0x89
'Man ', # 0x8a
'Zhang ', # 0x8b
'Yin ', # 0x8c
'[?] ', # 0x8d
'Ying ', # 0x8e
'Zhi ', # 0x8f
'Lu ', # 0x90
'Wu ', # 0x91
'Deng ', # 0x92
'Xiou ', # 0x93
'Zeng ', # 0x94
'Xun ', # 0x95
'Qu ', # 0x96
'Dang ', # 0x97
'Lin ', # 0x98
'Liao ', # 0x99
'Qiong ', # 0x9a
'Su ', # 0x9b
'Huang ', # 0x9c
'Gui ', # 0x9d
'Pu ', # 0x9e
'Jing ', # 0x9f
'Fan ', # 0xa0
'Jin ', # 0xa1
'Liu ', # 0xa2
'Ji ', # 0xa3
'[?] ', # 0xa4
'Jing ', # 0xa5
'Ai ', # 0xa6
'Bi ', # 0xa7
'Can ', # 0xa8
'Qu ', # 0xa9
'Zao ', # 0xaa
'Dang ', # 0xab
'Jiao ', # 0xac
'Gun ', # 0xad
'Tan ', # 0xae
'Hui ', # 0xaf
'Huan ', # 0xb0
'Se ', # 0xb1
'Sui ', # 0xb2
'Tian ', # 0xb3
'[?] ', # 0xb4
'Yu ', # 0xb5
'Jin ', # 0xb6
'Lu ', # 0xb7
'Bin ', # 0xb8
'Shou ', # 0xb9
'Wen ', # 0xba
'Zui ', # 0xbb
'Lan ', # 0xbc
'Xi ', # 0xbd
'Ji ', # 0xbe
'Xuan ', # 0xbf
'Ruan ', # 0xc0
'Huo ', # 0xc1
'Gai ', # 0xc2
'Lei ', # 0xc3
'Du ', # 0xc4
'Li ', # 0xc5
'Zhi ', # 0xc6
'Rou ', # 0xc7
'Li ', # 0xc8
'Zan ', # 0xc9
'Qiong ', # 0xca
'Zhe ', # 0xcb
'Gui ', # 0xcc
'Sui ', # 0xcd
'La ', # 0xce
'Long ', # 0xcf
'Lu ', # 0xd0
'Li ', # 0xd1
'Zan ', # 0xd2
'Lan ', # 0xd3
'Ying ', # 0xd4
'Mi ', # 0xd5
'Xiang ', # 0xd6
'Xi ', # 0xd7
'Guan ', # 0xd8
'Dao ', # 0xd9
'Zan ', # 0xda
'Huan ', # 0xdb
'Gua ', # 0xdc
'Bo ', # 0xdd
'Die ', # 0xde
'Bao ', # 0xdf
'Hu ', # 0xe0
'Zhi ', # 0xe1
'Piao ', # 0xe2
'Ban ', # 0xe3
'Rang ', # 0xe4
'Li ', # 0xe5
'Wa ', # 0xe6
'Dekaguramu ', # 0xe7
'Jiang ', # 0xe8
'Qian ', # 0xe9
'Fan ', # 0xea
'Pen ', # 0xeb
'Fang ', # 0xec
'Dan ', # 0xed
'Weng ', # 0xee
'Ou ', # 0xef
'Deshiguramu ', # 0xf0
'Miriguramu ', # 0xf1
'Thon ', # 0xf2
'Hu ', # 0xf3
'Ling ', # 0xf4
'Yi ', # 0xf5
'Ping ', # 0xf6
'Ci ', # 0xf7
'Hekutogura ', # 0xf8
'Juan ', # 0xf9
'Chang ', # 0xfa
'Chi ', # 0xfb
'Sarake ', # 0xfc
'Dang ', # 0xfd
'Meng ', # 0xfe
'Pou ', # 0xff
)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.