repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
Distrotech/reportlab | src/rl_addons/rl_accel/tests/t3.py | 14 | 1431 | from reportlab.pdfbase.pdfmetrics import getFont
import time
from sys import getrefcount
font = getFont('Times-Roman')
_py_stringWidth = font._py_stringWidth
stringWidth = font.stringWidth
assert stringWidth!=_py_stringWidth
#print "font=%s(%d) widths=%s(%d)" % (
# hex(id(font)), getrefcount(font),
# hex(id(font.widths)), getrefcount(font.widths),
# )
utext = 'This is the end of the \xce\x91\xce\xb2 world. This is the end of the \xce\x91\xce\xb2 world jap=\xe3\x83\x9b\xe3\x83\x86. This is the end of the \xce\x91\xce\xb2 world. This is the end of the \xce\x91\xce\xb2 world jap=\xe3\x83\x9b\xe3\x83\x86'.decode('utf8')
print(stringWidth(utext,12))
print(_py_stringWidth(utext,12))
assert stringWidth(utext,12)==_py_stringWidth(utext,12)
def tim(N,msg,func,*args):
t0 = time.time()
for i in range(N):
x = func(*args)
t1 = time.time()
return "%s N=%d t=%.3f\n%r" % (msg,N,t1-t0,x)
N=10000
print(tim(N,'_py_stringWidth',_py_stringWidth,utext,12))
print(tim(N,'stringWidth',stringWidth,utext,12))
utext='ABCDEFG'
N=100000
print(tim(N,'_py_stringWidth',_py_stringWidth,utext,12))
print(tim(N,'stringWidth',stringWidth,utext,12))
utext='ABCDEF\xce\xb2'
print(tim(N,'_py_stringWidth',_py_stringWidth,utext,12))
print(tim(N,'stringWidth',stringWidth,utext,12))
utext='ABCDE\xce\xb2G'
print(tim(N,'_py_stringWidth',_py_stringWidth,utext,12))
print(tim(N,'stringWidth',stringWidth,utext,12))
| bsd-3-clause |
JamesMura/yo_payments | setup.py | 1 | 1224 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='yo_payments',
version='0.1.6',
description='Python Api Wrapper for the Yo Payments service',
long_description=readme + '\n\n' + history,
author='James Muranga',
author_email='jmured@gmail.com',
url='https://github.com/jamesmura/yo_payments',
packages=[
'yo_payments',
],
package_dir={'yo_payments': 'yo_payments'},
include_package_data=True,
install_requires=[
"requests", "xmltodict"
],
license="MIT",
zip_safe=False,
keywords='yo_payments',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
],
test_suite='tests'
)
| mit |
sachintaware/sublime-wakatime | packages/wakatime/packages/requests/packages/chardet/mbcsgroupprober.py | 2769 | 1967 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .utf8prober import UTF8Prober
from .sjisprober import SJISProber
from .eucjpprober import EUCJPProber
from .gb2312prober import GB2312Prober
from .euckrprober import EUCKRProber
from .cp949prober import CP949Prober
from .big5prober import Big5Prober
from .euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
CP949Prober(),
Big5Prober(),
EUCTWProber()
]
self.reset()
| bsd-3-clause |
kalaidin/luigi | test/contrib/hadoop_jar_test.py | 20 | 2357 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import luigi
import tempfile
from helpers import unittest
from luigi.contrib.hadoop_jar import HadoopJarJobError, HadoopJarJobTask
from mock import patch, MagicMock
class TestHadoopJarJob(HadoopJarJobTask):
path = luigi.Parameter()
def jar(self):
return self.path
class TestMissingJarJob(HadoopJarJobTask):
pass
class TestRemoteHadoopJarJob(TestHadoopJarJob):
def ssh(self):
return {"host": "myhost", "key_file": "file", "username": "user"}
class TestRemoteMissingJarJob(TestHadoopJarJob):
def ssh(self):
return {"host": "myhost", "key_file": "file"}
class HadoopJarJobTaskTest(unittest.TestCase):
@patch('luigi.contrib.hadoop.run_and_track_hadoop_job')
def test_good(self, mock_job):
mock_job.return_value = None
with tempfile.NamedTemporaryFile() as temp_file:
task = TestHadoopJarJob(temp_file.name)
task.run()
@patch('luigi.contrib.hadoop.run_and_track_hadoop_job')
def test_missing_jar(self, mock_job):
mock_job.return_value = None
task = TestMissingJarJob()
self.assertRaises(HadoopJarJobError, task.run)
@patch('luigi.contrib.hadoop.run_and_track_hadoop_job')
def test_remote_job(self, mock_job):
mock_job.return_value = None
with tempfile.NamedTemporaryFile() as temp_file:
task = TestRemoteHadoopJarJob(temp_file.name)
task.run()
@patch('luigi.contrib.hadoop.run_and_track_hadoop_job')
def test_remote_job_missing_config(self, mock_job):
mock_job.return_value = None
with tempfile.NamedTemporaryFile() as temp_file:
task = TestRemoteMissingJarJob(temp_file.name)
self.assertRaises(HadoopJarJobError, task.run)
| apache-2.0 |
emon10005/sympy | sympy/printing/tests/test_jscode.py | 59 | 10228 | from sympy.core import pi, oo, symbols, Rational, Integer, GoldenRatio, EulerGamma, Catalan, Lambda, Dummy
from sympy.functions import Piecewise, sin, cos, Abs, exp, ceiling, sqrt
from sympy.utilities.pytest import raises
from sympy.printing.jscode import JavascriptCodePrinter
from sympy.utilities.lambdify import implemented_function
from sympy.tensor import IndexedBase, Idx
from sympy.matrices import Matrix, MatrixSymbol
from sympy import jscode
x, y, z = symbols('x,y,z')
def test_printmethod():
assert jscode(Abs(x)) == "Math.abs(x)"
def test_jscode_sqrt():
assert jscode(sqrt(x)) == "Math.sqrt(x)"
assert jscode(x**0.5) == "Math.sqrt(x)"
assert jscode(sqrt(x)) == "Math.sqrt(x)"
def test_jscode_Pow():
g = implemented_function('g', Lambda(x, 2*x))
assert jscode(x**3) == "Math.pow(x, 3)"
assert jscode(x**(y**3)) == "Math.pow(x, Math.pow(y, 3))"
assert jscode(1/(g(x)*3.5)**(x - y**x)/(x**2 + y)) == \
"Math.pow(3.5*2*x, -x + Math.pow(y, x))/(Math.pow(x, 2) + y)"
assert jscode(x**-1.0) == '1/x'
def test_jscode_constants_mathh():
assert jscode(exp(1)) == "Math.E"
assert jscode(pi) == "Math.PI"
assert jscode(oo) == "Number.POSITIVE_INFINITY"
assert jscode(-oo) == "Number.NEGATIVE_INFINITY"
def test_jscode_constants_other():
assert jscode(
2*GoldenRatio) == "var GoldenRatio = 1.61803398874989;\n2*GoldenRatio"
assert jscode(2*Catalan) == "var Catalan = 0.915965594177219;\n2*Catalan"
assert jscode(
2*EulerGamma) == "var EulerGamma = 0.577215664901533;\n2*EulerGamma"
def test_jscode_Rational():
assert jscode(Rational(3, 7)) == "3/7"
assert jscode(Rational(18, 9)) == "2"
assert jscode(Rational(3, -7)) == "-3/7"
assert jscode(Rational(-3, -7)) == "3/7"
def test_jscode_Integer():
assert jscode(Integer(67)) == "67"
assert jscode(Integer(-1)) == "-1"
def test_jscode_functions():
assert jscode(sin(x) ** cos(x)) == "Math.pow(Math.sin(x), Math.cos(x))"
def test_jscode_inline_function():
x = symbols('x')
g = implemented_function('g', Lambda(x, 2*x))
assert jscode(g(x)) == "2*x"
g = implemented_function('g', Lambda(x, 2*x/Catalan))
assert jscode(g(x)) == "var Catalan = %s;\n2*x/Catalan" % Catalan.n()
A = IndexedBase('A')
i = Idx('i', symbols('n', integer=True))
g = implemented_function('g', Lambda(x, x*(1 + x)*(2 + x)))
assert jscode(g(A[i]), assign_to=A[i]) == (
"for (var i=0; i<n; i++){\n"
" A[i] = (A[i] + 1)*(A[i] + 2)*A[i];\n"
"}"
)
def test_jscode_exceptions():
assert jscode(ceiling(x)) == "Math.ceil(x)"
assert jscode(Abs(x)) == "Math.abs(x)"
def test_jscode_boolean():
assert jscode(x & y) == "x && y"
assert jscode(x | y) == "x || y"
assert jscode(~x) == "!x"
assert jscode(x & y & z) == "x && y && z"
assert jscode(x | y | z) == "x || y || z"
assert jscode((x & y) | z) == "z || x && y"
assert jscode((x | y) & z) == "z && (x || y)"
def test_jscode_Piecewise():
expr = Piecewise((x, x < 1), (x**2, True))
p = jscode(expr)
s = \
"""\
((x < 1) ? (
x
)
: (
Math.pow(x, 2)
))\
"""
assert p == s
assert jscode(expr, assign_to="c") == (
"if (x < 1) {\n"
" c = x;\n"
"}\n"
"else {\n"
" c = Math.pow(x, 2);\n"
"}")
# Check that Piecewise without a True (default) condition error
expr = Piecewise((x, x < 1), (x**2, x > 1), (sin(x), x > 0))
raises(ValueError, lambda: jscode(expr))
def test_jscode_Piecewise_deep():
p = jscode(2*Piecewise((x, x < 1), (x**2, True)))
s = \
"""\
2*((x < 1) ? (
x
)
: (
Math.pow(x, 2)
))\
"""
assert p == s
def test_jscode_settings():
raises(TypeError, lambda: jscode(sin(x), method="garbage"))
def test_jscode_Indexed():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o = symbols('n m o', integer=True)
i, j, k = Idx('i', n), Idx('j', m), Idx('k', o)
p = JavascriptCodePrinter()
p._not_c = set()
x = IndexedBase('x')[j]
assert p._print_Indexed(x) == 'x[j]'
A = IndexedBase('A')[i, j]
assert p._print_Indexed(A) == 'A[%s]' % (m*i+j)
B = IndexedBase('B')[i, j, k]
assert p._print_Indexed(B) == 'B[%s]' % (i*o*m+j*o+k)
assert p._not_c == set()
def test_jscode_loops_matrix_vector():
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (var i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
'for (var i=0; i<m; i++){\n'
' for (var j=0; j<n; j++){\n'
' y[i] = x[j]*A[n*i + j] + y[i];\n'
' }\n'
'}'
)
c = jscode(A[i, j]*x[j], assign_to=y[i])
assert c == s
def test_dummy_loops():
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'for (var i_%(icount)i=0; i_%(icount)i<m_%(mcount)i; i_%(icount)i++){\n'
' y[i_%(icount)i] = x[i_%(icount)i];\n'
'}'
) % {'icount': i.label.dummy_index, 'mcount': m.dummy_index}
code = jscode(x[i], assign_to=y[i])
assert code == expected
def test_jscode_loops_add():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
z = IndexedBase('z')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (var i=0; i<m; i++){\n'
' y[i] = x[i] + z[i];\n'
'}\n'
'for (var i=0; i<m; i++){\n'
' for (var j=0; j<n; j++){\n'
' y[i] = x[j]*A[n*i + j] + y[i];\n'
' }\n'
'}'
)
c = jscode(A[i, j]*x[j] + x[i] + z[i], assign_to=y[i])
assert c == s
def test_jscode_loops_multiple_contractions():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s = (
'for (var i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
'for (var i=0; i<m; i++){\n'
' for (var j=0; j<n; j++){\n'
' for (var k=0; k<o; k++){\n'
' for (var l=0; l<p; l++){\n'
' y[i] = y[i] + b[%s]*a[%s];\n' % (j*o*p + k*p + l, i*n*o*p + j*o*p + k*p + l) +\
' }\n'
' }\n'
' }\n'
'}'
)
c = jscode(b[j, k, l]*a[i, j, k, l], assign_to=y[i])
assert c == s
def test_jscode_loops_addfactor():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
c = IndexedBase('c')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s = (
'for (var i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
'for (var i=0; i<m; i++){\n'
' for (var j=0; j<n; j++){\n'
' for (var k=0; k<o; k++){\n'
' for (var l=0; l<p; l++){\n'
' y[i] = (a[%s] + b[%s])*c[%s] + y[i];\n' % (i*n*o*p + j*o*p + k*p + l, i*n*o*p + j*o*p + k*p + l, j*o*p + k*p + l) +\
' }\n'
' }\n'
' }\n'
'}'
)
c = jscode((a[i, j, k, l] + b[i, j, k, l])*c[j, k, l], assign_to=y[i])
assert c == s
def test_jscode_loops_multiple_terms():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
c = IndexedBase('c')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
s0 = (
'for (var i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
)
s1 = (
'for (var i=0; i<m; i++){\n'
' for (var j=0; j<n; j++){\n'
' for (var k=0; k<o; k++){\n'
' y[i] = b[j]*b[k]*c[%s] + y[i];\n' % (i*n*o + j*o + k) +\
' }\n'
' }\n'
'}\n'
)
s2 = (
'for (var i=0; i<m; i++){\n'
' for (var k=0; k<o; k++){\n'
' y[i] = b[k]*a[%s] + y[i];\n' % (i*o + k) +\
' }\n'
'}\n'
)
s3 = (
'for (var i=0; i<m; i++){\n'
' for (var j=0; j<n; j++){\n'
' y[i] = b[j]*a[%s] + y[i];\n' % (i*n + j) +\
' }\n'
'}\n'
)
c = jscode(
b[j]*a[i, j] + b[k]*a[i, k] + b[j]*b[k]*c[i, j, k], assign_to=y[i])
assert (c == s0 + s1 + s2 + s3[:-1] or
c == s0 + s1 + s3 + s2[:-1] or
c == s0 + s2 + s1 + s3[:-1] or
c == s0 + s2 + s3 + s1[:-1] or
c == s0 + s3 + s1 + s2[:-1] or
c == s0 + s3 + s2 + s1[:-1])
def test_Matrix_printing():
# Test returning a Matrix
mat = Matrix([x*y, Piecewise((2 + x, y>0), (y, True)), sin(z)])
A = MatrixSymbol('A', 3, 1)
assert jscode(mat, A) == (
"A[0] = x*y;\n"
"if (y > 0) {\n"
" A[1] = x + 2;\n"
"}\n"
"else {\n"
" A[1] = y;\n"
"}\n"
"A[2] = Math.sin(z);")
# Test using MatrixElements in expressions
expr = Piecewise((2*A[2, 0], x > 0), (A[2, 0], True)) + sin(A[1, 0]) + A[0, 0]
assert jscode(expr) == (
"((x > 0) ? (\n"
" 2*A[2]\n"
")\n"
": (\n"
" A[2]\n"
")) + Math.sin(A[1]) + A[0]")
# Test using MatrixElements in a Matrix
q = MatrixSymbol('q', 5, 1)
M = MatrixSymbol('M', 3, 3)
m = Matrix([[sin(q[1,0]), 0, cos(q[2,0])],
[q[1,0] + q[2,0], q[3, 0], 5],
[2*q[4, 0]/q[1,0], sqrt(q[0,0]) + 4, 0]])
assert jscode(m, M) == (
"M[0] = Math.sin(q[1]);\n"
"M[1] = 0;\n"
"M[2] = Math.cos(q[2]);\n"
"M[3] = q[1] + q[2];\n"
"M[4] = q[3];\n"
"M[5] = 5;\n"
"M[6] = 2*q[4]*1/q[1];\n"
"M[7] = 4 + Math.sqrt(q[0]);\n"
"M[8] = 0;")
| bsd-3-clause |
carhaas/cdec-semparse | python/cdec/sa/online.py | 5 | 4497 | from __future__ import division
import collections
import gzip
import itertools
from cdec.sa._sa import gzip_or_text
# Same as Cython implementation. Collisions with NULL in bitext?
NULL_WORD = 'NULL'
def learn_vocab(text_f):
vocab = set()
for line in gzip_or_text(text_f):
for word in line.strip().split():
vocab.add(word)
return vocab
def write_vocab(vocab, out_f):
with gzip.open(out_f, 'wb') as out:
for word in sorted(vocab):
out.write('{}\n'.format(word))
def read_vocab(in_f):
return set(line.strip() for line in gzip_or_text(in_f))
class Bilex:
def __init__(self, in_f=None):
self.f = collections.defaultdict(int)
self.e = collections.defaultdict(int)
self.fe = collections.defaultdict(int)
if in_f:
self.read(in_f)
# Compatibility with Cython implementation
def get_score(self, f, e, dir):
if dir == 0:
p = self.p_fe(f, e)
elif dir == 1:
p = self.p_ef(e, f)
return p
def p_fe(self, f, e):
denom = self.f.get(f, None)
if not denom:
return None
num = self.fe.get((f, e), None)
if not num:
return None
return num / denom
def p_ef(self, e, f):
denom = self.e.get(e, None)
if not denom:
return None
num = self.fe.get((f, e), None)
if not num:
return None
return num / denom
# Update counts from aligned sentence
def update(self, f_words, e_words, links):
covered_f = set()
covered_e = set()
for (i, j) in links:
covered_f.add(i)
covered_e.add(j)
self.f[f_words[i]] += 1
self.e[e_words[j]] += 1
self.fe[(f_words[i], e_words[j])] += 1
# e being covered corresponds to f->e
for j in range(len(e_words)):
if j not in covered_e:
self.f[NULL_WORD] += 1
self.e[e_words[j]] += 1
self.fe[(NULL_WORD, e_words[j])] += 1
# e->f
for i in range(len(f_words)):
if i not in covered_f:
self.f[f_words[i]] += 1
self.e[NULL_WORD] += 1
self.fe[(f_words[i], NULL_WORD)] += 1
# Update counts from alignd bitext
def add_bitext(self, alignment_f, text_f, target_text_f=None):
# Allow one or two args for bitext
if target_text_f:
t = itertools.izip((line.strip() for line in gzip_or_text(text_f)), (line.strip() for line in gzip_or_text(target_text_f)))
else:
t = (line.strip().split(' ||| ') for line in gzip_or_text(text_f))
a = (line.strip() for line in gzip_or_text(alignment_f))
for (source, target) in t:
links = sorted(tuple(int(link) for link in link_str.split('-')) for link_str in a.next().split())
self.update(source.split(), target.split(), links)
def write(self, out_f):
with gzip.open(out_f, 'wb') as out:
for f in sorted(self.f):
out.write('{} {}\n'.format(f, self.f[f]))
out.write('\n')
for e in sorted(self.e):
out.write('{} {}\n'.format(e, self.e[e]))
out.write('\n')
for (f, e) in sorted(self.fe):
out.write('{} {} {}\n'.format(f, e, self.fe[(f, e)]))
out.write('\n')
def read(self, in_f):
with gzip_or_text(in_f) as inp:
while True:
line = inp.readline().strip()
if not line:
break
(w, c) = line.split()
self.f[w] = int(c)
while True:
line = inp.readline().strip()
if not line:
break
(w, c) = line.split()
self.e[w] = int(c)
while True:
line = inp.readline().strip()
if not line:
break
(f, e, c) = line.split()
self.fe[(f, e)] = float(c)
# Bilex get_score for multiple instances
def get_score_multilex(f, e, dir, bilex_list):
num = 0
denom = 0
for bilex in bilex_list:
if dir == 0:
denom += bilex.f.get(f, 0)
else:
denom += bilex.e.get(e, 0)
num += bilex.fe.get((f, e), 0)
if (not num) or (not denom):
return None
return num / denom
| apache-2.0 |
ramcn/demo3 | venv/lib/python3.4/site-packages/django/db/backends/mysql/client.py | 520 | 1518 | import subprocess
from django.db.backends.base.client import BaseDatabaseClient
class DatabaseClient(BaseDatabaseClient):
executable_name = 'mysql'
@classmethod
def settings_to_cmd_args(cls, settings_dict):
args = [cls.executable_name]
db = settings_dict['OPTIONS'].get('db', settings_dict['NAME'])
user = settings_dict['OPTIONS'].get('user', settings_dict['USER'])
passwd = settings_dict['OPTIONS'].get('passwd', settings_dict['PASSWORD'])
host = settings_dict['OPTIONS'].get('host', settings_dict['HOST'])
port = settings_dict['OPTIONS'].get('port', settings_dict['PORT'])
cert = settings_dict['OPTIONS'].get('ssl', {}).get('ca')
defaults_file = settings_dict['OPTIONS'].get('read_default_file')
# Seems to be no good way to set sql_mode with CLI.
if defaults_file:
args += ["--defaults-file=%s" % defaults_file]
if user:
args += ["--user=%s" % user]
if passwd:
args += ["--password=%s" % passwd]
if host:
if '/' in host:
args += ["--socket=%s" % host]
else:
args += ["--host=%s" % host]
if port:
args += ["--port=%s" % port]
if cert:
args += ["--ssl-ca=%s" % cert]
if db:
args += [db]
return args
def runshell(self):
args = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict)
subprocess.call(args)
| mit |
kyroskoh/phantomjs | src/qt/qtwebkit/Source/ThirdParty/gtest/scripts/pump.py | 233 | 22879 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.1 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$\$.*'), '$$'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$$': # A meta comment.
if prev_token_rstripped:
yield prev_token_rstripped
pos = Cursor(found.end.line + 1, 0)
elif found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
lines = s.splitlines(True)
return TokenizeLines(lines, Cursor(0, 0))
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def Convert(file_path):
s = file(file_path, 'r').read()
tokens = []
for token in Tokenize(s):
tokens.append(token)
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirevative(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines != [] and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirevative(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsHeaderGuardOrInclude(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsComment(line):
if IsHeaderGuardOrInclude(line):
# The style guide made an exception to allow long header guard lines
# and includes.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirevative(output, line):
if IsHeaderGuardOrInclude(line):
# The style guide made an exception to allow long header guard lines
# and includes.
output.append(line)
else:
WrapPreprocessorDirevative(line, output)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
ast = Convert(file_path)
output = Output()
RunCode(Env(), ast, output)
output_str = BeautifyCode(output.string)
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause |
QijunPan/ansible | lib/ansible/modules/web_infrastructure/django_manage.py | 31 | 11716 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Scott Anderson <scottanderson42@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: django_manage
short_description: Manages a Django application.
description:
- Manages a Django application using the I(manage.py) application frontend to I(django-admin). With the I(virtualenv) parameter, all management commands will be executed by the given I(virtualenv) installation.
version_added: "1.1"
options:
command:
choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ]
description:
- The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate.
- Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run with the I(--noinput) flag.
required: true
app_path:
description:
- The path to the root of the Django application where B(manage.py) lives.
required: true
settings:
description:
- The Python path to the application's settings module, such as 'myapp.settings'.
required: false
pythonpath:
description:
- A directory to add to the Python path. Typically used to include the settings module if it is located external to the application directory.
required: false
virtualenv:
description:
- An optional path to a I(virtualenv) installation to use while running the manage application.
required: false
apps:
description:
- A list of space-delimited apps to target. Used by the 'test' command.
required: false
cache_table:
description:
- The name of the table used for database-backed caching. Used by the 'createcachetable' command.
required: false
database:
description:
- The database to target. Used by the 'createcachetable', 'flush', 'loaddata', and 'syncdb' commands.
required: false
failfast:
description:
- Fail the command immediately if a test fails. Used by the 'test' command.
required: false
default: "no"
choices: [ "yes", "no" ]
fixtures:
description:
- A space-delimited list of fixture file names to load in the database. B(Required) by the 'loaddata' command.
required: false
skip:
description:
- Will skip over out-of-order missing migrations, you can only use this parameter with I(migrate)
required: false
version_added: "1.3"
merge:
description:
- Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with 'migrate' command
required: false
version_added: "1.3"
link:
description:
- Will create links to the files instead of copying them, you can only use this parameter with 'collectstatic' command
required: false
version_added: "1.3"
notes:
- I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified.
- This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location.
- This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately.
- To be able to use the migrate command with django versions < 1.7, you must have south installed and added as an app in your settings.
- To be able to use the collectstatic command, you must have enabled staticfiles in your settings.
- As of ansible 2.x, your I(manage.py) application must be executable (rwxr-xr-x), and must have a valid I(shebang), i.e. "#!/usr/bin/env python", for invoking the appropriate Python interpreter.
requirements: [ "virtualenv", "django" ]
author: "Scott Anderson (@tastychutney)"
'''
EXAMPLES = """
# Run cleanup on the application installed in 'django_dir'.
- django_manage:
command: cleanup
app_path: "{{ django_dir }}"
# Load the initial_data fixture into the application
- django_manage:
command: loaddata
app_path: "{{ django_dir }}"
fixtures: "{{ initial_data }}"
# Run syncdb on the application
- django_manage:
command: syncdb
app_path: "{{ django_dir }}"
settings: "{{ settings_app_name }}"
pythonpath: "{{ settings_dir }}"
virtualenv: "{{ virtualenv_dir }}"
# Run the SmokeTest test case from the main app. Useful for testing deploys.
- django_manage:
command: test
app_path: "{{ django_dir }}"
apps: main.SmokeTest
# Create an initial superuser.
- django_manage:
command: "createsuperuser --noinput --username=admin --email=admin@example.com"
app_path: "{{ django_dir }}"
"""
import os
def _fail(module, cmd, out, err, **kwargs):
msg = ''
if out:
msg += "stdout: %s" % (out, )
if err:
msg += "\n:stderr: %s" % (err, )
module.fail_json(cmd=cmd, msg=msg, **kwargs)
def _ensure_virtualenv(module):
venv_param = module.params['virtualenv']
if venv_param is None:
return
vbin = os.path.join(os.path.expanduser(venv_param), 'bin')
activate = os.path.join(vbin, 'activate')
if not os.path.exists(activate):
virtualenv = module.get_bin_path('virtualenv', True)
vcmd = '%s %s' % (virtualenv, venv_param)
vcmd = [virtualenv, venv_param]
rc, out_venv, err_venv = module.run_command(vcmd)
if rc != 0:
_fail(module, vcmd, out_venv, err_venv)
os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"])
os.environ["VIRTUAL_ENV"] = venv_param
def createcachetable_filter_output(line):
return "Already exists" not in line
def flush_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def loaddata_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def syncdb_filter_output(line):
return ("Creating table " in line) or ("Installed" in line and "Installed 0 object" not in line)
def migrate_filter_output(line):
return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) or ("Applying" in line)
def collectstatic_filter_output(line):
return line and "0 static files" not in line
def main():
command_allowed_param_map = dict(
cleanup=(),
createcachetable=('cache_table', 'database', ),
flush=('database', ),
loaddata=('database', 'fixtures', ),
syncdb=('database', ),
test=('failfast', 'testrunner', 'liveserver', 'apps', ),
validate=(),
migrate=('apps', 'skip', 'merge', 'database',),
collectstatic=('clear', 'link', ),
)
command_required_param_map = dict(
loaddata=('fixtures', ),
)
# forces --noinput on every command that needs it
noinput_commands = (
'flush',
'syncdb',
'migrate',
'test',
'collectstatic',
)
# These params are allowed for certain commands only
specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner')
# These params are automatically added to the command if present
general_params = ('settings', 'pythonpath', 'database',)
specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link')
end_of_command_params = ('apps', 'cache_table', 'fixtures')
module = AnsibleModule(
argument_spec=dict(
command = dict(default=None, required=True),
app_path = dict(default=None, required=True),
settings = dict(default=None, required=False),
pythonpath = dict(default=None, required=False, aliases=['python_path']),
virtualenv = dict(default=None, required=False, aliases=['virtual_env']),
apps = dict(default=None, required=False),
cache_table = dict(default=None, required=False),
clear = dict(default=None, required=False, type='bool'),
database = dict(default=None, required=False),
failfast = dict(default='no', required=False, type='bool', aliases=['fail_fast']),
fixtures = dict(default=None, required=False),
liveserver = dict(default=None, required=False, aliases=['live_server']),
testrunner = dict(default=None, required=False, aliases=['test_runner']),
skip = dict(default=None, required=False, type='bool'),
merge = dict(default=None, required=False, type='bool'),
link = dict(default=None, required=False, type='bool'),
),
)
command = module.params['command']
app_path = os.path.expanduser(module.params['app_path'])
virtualenv = module.params['virtualenv']
for param in specific_params:
value = module.params[param]
if param in specific_boolean_params:
value = module.boolean(value)
if value and param not in command_allowed_param_map[command]:
module.fail_json(msg='%s param is incompatible with command=%s' % (param, command))
for param in command_required_param_map.get(command, ()):
if not module.params[param]:
module.fail_json(msg='%s param is required for command=%s' % (param, command))
_ensure_virtualenv(module)
cmd = "./manage.py %s" % (command, )
if command in noinput_commands:
cmd = '%s --noinput' % cmd
for param in general_params:
if module.params[param]:
cmd = '%s --%s=%s' % (cmd, param, module.params[param])
for param in specific_boolean_params:
if module.boolean(module.params[param]):
cmd = '%s --%s' % (cmd, param)
# these params always get tacked on the end of the command
for param in end_of_command_params:
if module.params[param]:
cmd = '%s %s' % (cmd, module.params[param])
rc, out, err = module.run_command(cmd, cwd=os.path.expanduser(app_path))
if rc != 0:
if command == 'createcachetable' and 'table' in err and 'already exists' in err:
out = 'Already exists.'
else:
if "Unknown command:" in err:
_fail(module, cmd, err, "Unknown django command: %s" % command)
_fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path)
changed = False
lines = out.split('\n')
filt = globals().get(command + "_filter_output", None)
if filt:
filtered_output = list(filter(filt, lines))
if len(filtered_output):
changed = filtered_output
module.exit_json(changed=changed, out=out, cmd=cmd, app_path=app_path, virtualenv=virtualenv,
settings=module.params['settings'], pythonpath=module.params['pythonpath'])
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
gautelinga/BERNAISE | utilities/units.py | 1 | 3837 | '''
This converts the simulation parmatetor into dimensionless quantiteis and
give is the physical uints that the feilds should be scaled whit.
'''
# import external packs
import dolfin as df
import h5py
import os
import glob
import numpy as np
import sys
# import internal BERNAISE packs
# Find path to the BERNAISE root folder
bernaise_path = "/" + os.path.join(*os.path.realpath(__file__).split("/")[:-2])
# ...and append it to sys.path to get functionality from BERNAISE
sys.path.append(bernaise_path)
from utilities.generate_mesh import numpy_to_dolfin
from mpi4py.MPI import COMM_WORLD
from utilities.plot import plot_contour, plot_edges, plot_quiver, plot_faces,\
zero_level_set, plot_probes, plot_fancy
from utilities.generate_mesh import line_points
from common import load_parameters, info, parse_command_line, makedirs_safe, \
info_blue, info_cyan, info_split, info_on_red, info_red, info_yellow, \
parse_xdmf, info_warning
__author__ = "Asger Bolet"
__methods__ = ["dimensionless_numbers"]
__all__ = [] + __methods__
class LoadSettings:
""" Class for loading timeseries """
def __init__(self, folder):
self.folder = folder
self.settings_folder = os.path.join(folder, "Settings")
self.params_prefix = os.path.join(self.settings_folder,
"parameters_from_tstep_")
self.params_suffix = ".dat"
self.parameters = dict()
self._get_model_parameters()
def _get_model_parameters(self):
for params_file in glob.glob(
self.params_prefix + "*" + self.params_suffix):
parameters = dict()
load_parameters(parameters, params_file)
self.parameters = parameters
# The active parts of the solvers
self.enable_NS = self.parameters[enable_NS]
self.enable_PF = self.parameters[enable_PF]
self.enable_EC = self.parameters[enable_EC]
# Extracting important parameters
self.Lx = self.parameters["Lx"]
self.Ly = self.parameters["Ly"]
self.temperatur = 1
self.k_b = 1
self.varcum_permittivity = 1
self.q_e = 1
if self.enable_PF:
self.interface_thickness = self.parameters["interface_thickness"]
self.pf_mobility_coeff = self.parameters["pf_mobility_coeff"]
self.surface_tension = self.parameters["surface_tension"]
if self.enable_EC:
self.permittivity = self.parameters["permittivity"]
self.solutes = self.parameters["solutes"]
if self.enable_NS:
self.density = self.parameters["density"]
self.viscosity = self.parameters["viscosity"]
'''
interface_thickness
solutes=solutes
Lx=1.,
Ly=2.,
surface_tension
pf_mobility_coeff=factor*0.000010,
density=[10., 10.],
viscosity=[1., 1.],
permittivity=[1., 1.],
model_k_b = 1
model_T = 1
model_Vacuum permittivity = 1
model_electron_charge = 1
'''
#SimulationParameters
def main():
info_yellow("BERNAISE: Unit-conversion tool")
info_warning("Work in progress!")
cmd_kwargs = parse_command_line()
# Get help if it was called for.
if cmd_kwargs.get("help", False):
get_help()
folder = cmd_kwargs.get("folder", False)
if folder:
LoadSettings(folder)
# Call the specified method
#if method in __methods__:
# globals()[method](ts, **cmd_kwargs)
#else:
# info_on_red("The specified conversion method doesn't exist.")
if __name__ == "__main__":
main()
| mit |
cjaymes/pyscap | src/scap/model/oval_5/defs/StateType.py | 1 | 1565 | # Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.Model import Model
from scap.model.oval_5 import OPERATOR_ENUMERATION
logger = logging.getLogger(__name__)
class StateType(Model):
MODEL_MAP = {
'elements': [
{'xmlns': 'http://www.w3.org/2000/09/xmldsig#', 'tag_name': 'Signature', 'min': 0, 'max': 1},
{'xmlns': 'http://oval.mitre.org/XMLSchema/oval-common-5', 'tag_name': 'notes', 'class': 'NotesType', 'min': 0, 'max': 1},
],
'attributes': {
'id': {'type': 'scap.model.oval_5.StateIdPattern', 'required': True},
'version': {'type': 'NonNegativeIntegerType', 'required': True},
'operator': {'enum': OPERATOR_ENUMERATION, 'default': 'AND'},
'comment': {'type': 'scap.model.oval_5.NonEmptyString'}, # required in the spec
'deprecated': {'type': 'BooleanType', 'default': False},
}
}
| gpl-3.0 |
joejulian/openstack-guest-agents-unix | commands/password.py | 4 | 9612 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
JSON password reset handling plugin
"""
import base64
import binascii
import logging
import os
import subprocess
import time
from Crypto.Cipher import AES
import agentlib
import commands
# This is to support older python versions that don't have hashlib
try:
import hashlib
except ImportError:
import md5
class hashlib(object):
"""Fake hashlib module as a class"""
@staticmethod
def md5():
return md5.new()
class PasswordError(Exception):
"""
Class for password command exceptions
"""
def __init__(self, response):
# Should be a (ResponseCode, ResponseMessage) tuple
self.response = response
def __str__(self):
return "%s: %s" % self.response
def get_response(self):
return self.response
class PasswordCommands(commands.CommandBase):
"""
Class for password related commands
"""
def __init__(self, *args, **kwargs):
# prime to use
self.prime = 162259276829213363391578010288127
self.base = 5
self.kwargs = {}
self.kwargs.update(kwargs)
def _mod_exp(self, num, exp, mod):
result = 1
while exp > 0:
if (exp & 1) == 1:
result = (result * num) % mod
exp = exp >> 1
num = (num * num) % mod
return result
def _make_private_key(self):
"""
Create a private key using /dev/urandom
"""
return int(binascii.hexlify(os.urandom(16)), 16)
def _dh_compute_public_key(self, private_key):
"""
Given a private key, compute a public key
"""
return self._mod_exp(self.base, private_key, self.prime)
def _dh_compute_shared_key(self, public_key, private_key):
"""
Given public and private keys, compute the shared key
"""
return self._mod_exp(public_key, private_key, self.prime)
def _compute_aes_key(self, key):
"""
Given a key, compute the corresponding key that can be used
with AES
"""
m = hashlib.md5()
m.update(key)
aes_key = m.digest()
m = hashlib.md5()
m.update(aes_key)
m.update(key)
aes_iv = m.digest()
return (aes_key, aes_iv)
def _decrypt_password(self, aes_key, data):
aes = AES.new(aes_key[0], AES.MODE_CBC, aes_key[1])
passwd = aes.decrypt(data)
cut_off_sz = ord(passwd[len(passwd) - 1])
if cut_off_sz > 16 or len(passwd) < 16:
raise PasswordError((500, "Invalid password data received"))
passwd = passwd[: - cut_off_sz]
return passwd
def _decode_password(self, data):
try:
real_data = base64.b64decode(data)
except Exception:
raise PasswordError((500, "Couldn't decode base64 data"))
try:
aes_key = self.aes_key
except AttributeError:
raise PasswordError((500, "Password without key exchange"))
try:
passwd = self._decrypt_password(aes_key, real_data)
except PasswordError, e:
raise e
except Exception, e:
raise PasswordError((500, str(e)))
return passwd
def _change_password(self, passwd):
"""Actually change the password"""
if self.kwargs.get('testmode', False):
return None
# Make sure there are no newlines at the end
set_password('root', passwd.strip('\n'))
def _wipe_key(self):
"""
Remove key from a previous keyinit command
"""
try:
del self.aes_key
except AttributeError:
pass
@commands.command_add('keyinit')
def keyinit_cmd(self, data):
# Remote pubkey comes in as large number
# Or well, it should come in as a large number. It's possible
# that some legacy client code will send it as a string. So,
# we'll make sure to always convert it to long.
remote_public_key = long(data)
my_private_key = self._make_private_key()
my_public_key = self._dh_compute_public_key(my_private_key)
shared_key = str(self._dh_compute_shared_key(remote_public_key,
my_private_key))
self.aes_key = self._compute_aes_key(shared_key)
# The key needs to be a string response right now
return ("D0", str(my_public_key))
@commands.command_add('password')
def password_cmd(self, data):
try:
passwd = self._decode_password(data)
self._change_password(passwd)
except PasswordError, e:
return e.get_response()
self._wipe_key()
return (0, "")
def _make_salt(length):
"""Create a salt of appropriate length"""
salt_chars = 'abcdefghijklmnopqrstuvwxyz'
salt_chars += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
salt_chars += '0123456789./'
rand_data = os.urandom(length)
salt = ''
for c in rand_data:
salt += salt_chars[ord(c) % len(salt_chars)]
return salt
def _create_temp_password_file(user, password, filename):
"""Read original passwd file, generating a new temporary file.
Returns: The temporary filename
"""
with open(filename) as f:
file_data = f.readlines()
stat_info = os.stat(filename)
tmpfile = '%s.tmp.%d' % (filename, os.getpid())
# We have to use os.open() so that we can create the file with
# the appropriate modes. If we create it and set modes later,
# there's a small point of time where a non-root user could
# potentially open the file and wait for data to be written.
fd = os.open(tmpfile,
os.O_CREAT | os.O_TRUNC | os.O_WRONLY,
stat_info.st_mode)
f = None
success = False
try:
os.chown(tmpfile, stat_info.st_uid, stat_info.st_gid)
f = os.fdopen(fd, 'w')
for line in file_data:
if line.startswith('#'):
f.write(line)
continue
try:
(s_user, s_password, s_rest) = line.split(':', 2)
except ValueError:
f.write(line)
continue
if s_user != user:
f.write(line)
continue
if s_password.startswith('$'):
# Format is '$ID$SALT$HASH' where ID defines the
# ecnryption type. We'll re-use that, and make a salt
# that's the same size as the old
salt_data = s_password[1:].split('$')
salt = '$%s$%s$' % (salt_data[0],
_make_salt(len(salt_data[1])))
else:
# Default to MD5 as a minimum level of compatibility
salt = '$1$%s$' % _make_salt(8)
enc_pass = agentlib.encrypt_password(password, salt)
f.write("%s:%s:%s" % (s_user, enc_pass, s_rest))
f.close()
f = None
success = True
except Exception, e:
logging.error("Couldn't create temporary password file: %s" % str(e))
raise
finally:
if not success:
# Close the file if it's open
if f:
try:
os.unlink(tmpfile)
except Exception:
pass
# Make sure to unlink the tmpfile
try:
os.unlink(tmpfile)
except Exception:
pass
return tmpfile
def set_password(user, password):
"""Set the password for a particular user"""
INVALID = 0
PWD_MKDB = 1
RENAME = 2
files_to_try = {'/etc/shadow': RENAME,
'/etc/master.passwd': PWD_MKDB}
for filename, ftype in files_to_try.iteritems():
if not os.path.exists(filename):
continue
tmpfile = _create_temp_password_file(user, password, filename)
if ftype == RENAME:
bakfile = '/etc/shadow.bak.%d' % os.getpid()
os.rename(filename, bakfile)
os.rename(tmpfile, filename)
os.remove(bakfile)
return
if ftype == PWD_MKDB:
pipe = subprocess.PIPE
p = subprocess.Popen(['/usr/sbin/pwd_mkdb', tmpfile],
stdin=pipe, stdout=pipe, stderr=pipe)
(stdoutdata, stderrdata) = p.communicate()
if p.returncode != 0:
if stderrdata:
stderrdata.strip('\n')
else:
stderrdata = '<None>'
logging.error("pwd_mkdb failed: %s" % stderrdata)
try:
os.unlink(tmpfile)
except Exception:
pass
raise PasswordError(
(500, "Rebuilding the passwd database failed"))
return
raise PasswordError((500, "Unknown password file format"))
| apache-2.0 |
andrewls/bzrflag | bzagents/grid-agent.py | 1 | 20370 | #!/usr/bin/python -tt
# An incredibly simple agent. All we do is find the closest enemy tank, drive
# towards it, and shoot. Note that if friendly fire is allowed, you will very
# often kill your own tanks with this code.
#################################################################
# NOTE TO STUDENTS
# This is a starting point for you. You will need to greatly
# modify this code if you want to do anything useful. But this
# should help you to know how to interact with BZRC in order to
# get the information you need.
#
# After starting the bzrflag server, this is one way to start
# this code:
# python agent0.py [hostname] [port]
#
# Often this translates to something like the following (with the
# port name being printed out by the bzrflag server):
# python agent0.py localhost 49857
#################################################################
import sys
import math
import time
import random
import numpy
import grid_filter_gl
from bzrc import BZRC, Command
FLAGRADIUS = 2.5
FLAGSPREAD = 100
OBSTACLESPREAD = 20
SEARCH_GRID_SIZE = 20
class Agent(object):
"""Class handles all command and control logic for a teams tanks."""
def __init__(self, bzrc):
self.bzrc = bzrc
self.constants = self.bzrc.get_constants()
self.constants["truepositive"] = float(self.constants["truepositive"])
self.constants["truenegative"] = float(self.constants["truenegative"])
self.constants["falsepositive"] = 1 - self.constants["truepositive"]
self.constants["falsenegative"] = 1 - self.constants["truenegative"]
self.constants["worldsize"] = int(self.constants["worldsize"])
self.commands = []
self.obstacles = []
self.iterations = 0
# initialize the global occupancy grid
self.grid = [[0.4 for i in range(int(self.constants["worldsize"]))] for j in range(int(self.constants["worldsize"]))]
self.search_grid = [[False for i in range(self.constants["worldsize"]/SEARCH_GRID_SIZE)] for j in range(self.constants["worldsize"]/SEARCH_GRID_SIZE)]
grid_filter_gl.init_window(int(self.constants["worldsize"]), int(self.constants["worldsize"]))
print self.constants
def update_occupancy_grid_at_point(self, x, y, probability):
offset = int(self.constants["worldsize"]) / 2
self.grid[x + offset][y + offset] = probability
def get_occupancy_grid_at_point(self, x, y):
offset = int(self.constants["worldsize"]) / 2
return self.grid[x + offset][y + offset]
def get_search_grid_region_center_points(self):
# each region has a center
search_grid_squares = self.constants["worldsize"] / SEARCH_GRID_SIZE
offset = search_grid_squares / 2
center_values = [i * search_grid_squares + offset for i in range(SEARCH_GRID_SIZE)]
points = []
for i in range(SEARCH_GRID_SIZE):
for j in range(SEARCH_GRID_SIZE):
points.append((center_values[i], center_values[j]))
return points
# return [[(center_values[i], center_values[j]) for j in range(SEARCH_GRID_SIZE)] for i in range(SEARCH_GRID_SIZE)]
def get_search_grid_unsearched_regions(self):
search_grid_squares = self.constants["worldsize"] / SEARCH_GRID_SIZE
center_points = self.get_search_grid_region_center_points()
grid_to_world_offset = self.constants["worldsize"] / 2
unsearched_regions = []
for (x, y) in center_points:
# loop over all of the region within the grid
converged_cells = 0
for i in range(x - search_grid_squares/2, x + search_grid_squares/2):
for j in range(y - search_grid_squares/2, y + search_grid_squares/2):
probability = self.get_occupancy_grid_at_point(i - grid_to_world_offset, j - grid_to_world_offset)
if probability < 0.2 or probability > 0.8:
converged_cells += 1
# mark the region as unsearched if less than 95% of the cells have converged
if converged_cells < (search_grid_squares * search_grid_squares * 0.65):
unsearched_regions.append((x, y))
return unsearched_regions
def get_nearest_unsearched_region_to_point(self, x, y):
centers = self.get_search_grid_unsearched_regions()
if self.iterations == 5:
print "Getting nearest unsearched region to (%d, %d)" % (x, y)
if centers:
distance = float("INF")
closest = centers[0]
for center in centers:
center = (center[0] - 400, center[1] - 400)
distance_to_center = math.sqrt((x - center[0])**2 + (y - center[1])**2)
if distance_to_center < distance:
distance = distance_to_center
closest = center
if self.iterations == 5:
print "New closest center point is (%d, %d) with distance %f" % (closest[0], closest[1], distance)
return (closest[0] + 400, closest[1] + 400), distance
else:
# if there are no unsearched regions, just let the tanks drive in circles.
return (x, y), 0
def get_random_unsearched_region(self):
regions = self.get_search_grid_unsearched_regions()
return regions[random.randrange(len(regions))]
def get_random_region(self):
regions = self.get_search_grid_region_center_points()
return regions[random.randrange(len(regions))]
def done_exploring(self):
for i in range(len(self.grid)):
for j in range(len(self.grid[i])):
if self.grid[i][j] > 0.2 and self.grid[i][j] < 0.8:
return False
return True
def figure_out_where_to_go(self, x, y):
if self.iterations % 100 == 0: # let it move toward the new point for two full iterations
print "Selecting using random region."
target_point = self.get_random_region()
elif self.iterations % 50 == 0:
print "Selecting using random unsearched region."
target_point = self.get_random_unsearched_region()
else :
target_point, distance = self.get_nearest_unsearched_region_to_point(x, y)
target_point = (target_point[0] - 400, target_point[1] - 400)
print "Moving toward point (%d, %d)" % target_point
return target_point
def tick(self, time_diff):
"""Some time has passed; decide what to do next."""
mytanks, othertanks, flags, shots = self.bzrc.get_lots_o_stuff()
self.mytanks = mytanks
self.othertanks = othertanks
self.flags = flags
self.shots = shots
self.enemies = [tank for tank in othertanks if tank.color !=
self.constants['team']]
self.commands = []
self.iterations += 1
print "Iteration %d" % self.iterations
for tank in mytanks:
# get the occgrid for that tank
occgrid = self.bzrc.get_occgrid(tank.index)
starting_point = occgrid[0]
occgrid = occgrid[1]
# so now we have a grid that starts at starting_point and goes up and right. The first point is the starting point, and then each point moves up the world
for i in range(len(occgrid)):
for j in range(len(occgrid[i])):
observation = occgrid[i][j]
# so we have our observation. Let's update the probabilities
prior_probability = self.get_occupancy_grid_at_point(starting_point[0] + i, starting_point[1] + j)
# p(observation = occupied) = p(observation = occupied | state = occupied) * p(state = occupied) + p(observation = occupied | state = unoccupied) * p(state = unnocupied)
# so the observation probability is just the true positive rate times the prior plus the false positive rate times 1 - the prior
if observation:
observation_probability = self.constants["truepositive"] * prior_probability + self.constants["falsepositive"] * (1 - prior_probability)
else:
observation_probability = self.constants["truenegative"] * (1 - prior_probability) + self.constants["falsenegative"] * prior_probability
# the likelihood just depends on what the observation actually was
# if the observation is occupied, we want p(observation = occupied | state = occupied), or the true positive rate
# if the observation is unoccupied, we want p(observation = unoccupied | state = occupied), or the false negative rate
likelihood = self.constants["truepositive"] if observation else self.constants["falsenegative"]
# p(state = occupied | observation = occupied) = p(observation = occupied | state = occupied) * p(state = occupied) / p(observation = occupied)
# p(state = occupied | observation = unnoccupied) = p(observation = unoccupied | state = occupied) * p(satet = occupied) / p(observation = unoccupied)
new_probability = likelihood * prior_probability / observation_probability
# and finally, update the probability at that grid location
self.update_occupancy_grid_at_point(starting_point[0] + i, starting_point[1] + j, new_probability)
if self.iterations % 5 == 0:
grid_filter_gl.update_grid(numpy.array(self.grid))
grid_filter_gl.draw_grid()
if self.done_exploring():
sys.exit()
# user potential fields to explore
self.potential_fields = {}
def attractive_fields_func(x, y, res):
# determine how far from point to flags
# calculate attractive fields to closest flag
alpha = 10
closest_flag = None
distance = float("inf")
# find closest flag
self.flags = []
# found_unexplored_point = False
# for i in range(len(self.grid)):
# for j in range(len(self.grid[i])):
# if self.grid[i][j] > 0.2 and self.grid[i][j] < 0.8:
# found_unexplored_point = True
# closest_flag = (i - 400, j - 400)
# distance = math.sqrt((i - 400 - x)**2 + (j - 400 - y)**2)
# print "Moving to point (%d, %d)" % closest_flag
# break
# if found_unexplored_point:
# break
# for i in range(len(self.grid)):
# self.flags = self.flags + [(i - 400 + (i * 20), j - 400 + (j * 20)) for j in range(len(self.grid[i])) if self.grid[i][j] > 0.2 and self.grid[i][j] < 0.8]
# found_unexplored_point = False
# for flag in self.flags:
# distance_to_flag = math.sqrt((flag[0] - x)**2 + (flag[1] - y)**2)
# if distance_to_flag < distance:
# distance = distance_to_flag
# closest_flag = flag
if self.iterations % 100 == 0: # let it move toward the new point for two full iterations
print "Selecting using random region."
closest_flag = self.get_random_region()
distance = math.sqrt((x - closest_flag[0])**2 + (y - closest_flag[1])**2)
elif self.iterations % 50 == 0:
print "Selecting using random unsearched region."
closest_flag = self.get_random_unsearched_region()
distance = math.sqrt((x - closest_flag[0])**2 + (y - closest_flag[1])**2)
else :
closest_flag, distance = self.get_nearest_unsearched_region_to_point(x, y)
closest_flag = (closest_flag[0] - 400, closest_flag[1] - 400)
print "Moving toward point (%d, %d)" % closest_flag
# calculate angle between closest_flag and tank
angle = math.atan2(closest_flag[1] - y, closest_flag[0] - x)
# calculate dx and dy based off of distance and angle
if distance < FLAGRADIUS:
return 0,0
elif distance < FLAGRADIUS + FLAGSPREAD:
return alpha * (distance - FLAGRADIUS) * math.cos(angle), alpha * (distance - FLAGRADIUS) * math.sin(angle)
else:
return alpha * FLAGSPREAD * math.cos(angle), alpha * FLAGSPREAD * math.sin(angle)
def repulsive_fields_func(x, y, res):
alpha = 10
INFINITY = 1000000
potential_fields = []
for i in range(len(self.grid)): #row
for j in range(len(self.grid[i])): #point
if self.grid[i][j] > .8:
xavg = i - 400
yavg = j - 400
# for corner in obstacle:
# xavg += corner[0]
# yavg += corner[1]
# xavg = xavg / len(obstacle)
# yavg = yavg / len(obstacle)
radius = 10 #todo frob this later
distance = math.sqrt((xavg - x)**2 + (yavg - y)**2) #tank distance from center
angle = math.atan2(yavg - y, xavg - x)
if distance < radius + OBSTACLESPREAD:
potential_fields.append((-alpha * (OBSTACLESPREAD + radius - distance) * math.cos(angle), -alpha * (OBSTACLESPREAD + radius - distance) * math.sin(angle)))
# merge potential fields
return self.merge_potential_fields(potential_fields)
def tangential_fields_func(x, y, res):
alpha = 1
potential_fields = []
for i in range(len(self.grid)):
for j in range(len(self.grid[i])):
if i > 795 or j > 795:
continue
if (self.grid[i][j] > 0.8 and self.grid[i+1][j] > 0.8 and self.grid[i+2][j] > 0.8 and self.grid[i+3][j] > 0.8) or (self.grid[i][j] > 0.8 and self.grid[i][j+1] > 0.8 and self.grid[i][j+2] > 0.8 and self.grid[i][j+3] > 0.8):
xavg = i - 400
yavg = j - 400
radius = 10 # magic number - frob
distance = math.sqrt((xavg - x)**2 + (yavg - y)**2) #tank distance from center
angle = math.atan2(yavg - y, xavg - x) + math.pi/2
if distance < radius + OBSTACLESPREAD:
potential_fields.append((-alpha * (OBSTACLESPREAD + radius - distance) * math.cos(angle), -alpha * (OBSTACLESPREAD + radius - distance) * math.sin(angle)))
# for obstacle in self.obstacles:
# xavg = 0
# yavg = 0
# for corner in obstacle:
# xavg += corner[0]
# yavg += corner[1]
# xavg = xavg / len(obstacle)
# yavg = yavg / len(obstacle)
# radius = math.sqrt((obstacle[0][0] - xavg)**2 + (obstacle[0][1] - yavg)**2)
# distance = math.sqrt((xavg - x)**2 + (yavg - y)**2) #tank distance from center
# angle = math.atan2(yavg - y, xavg - x) + math.pi/2
# if distance < radius + OBSTACLESPREAD:
# potential_fields.append((-alpha * (OBSTACLESPREAD + radius - distance) * math.cos(angle), -alpha * (OBSTACLESPREAD + radius - distance) * math.sin(angle)))
return self.merge_potential_fields(potential_fields)
def super_tab(x, y, res):
potential_fields = [attractive_fields_func(x, y, res)]
potential_fields = potential_fields + [repulsive_fields_func(x, y, res)]
potential_fields = potential_fields + [tangential_fields_func(x, y, res)]
merged = self.merge_potential_fields(potential_fields)
return merged[0], merged[1]
self.attractive_fields_func = attractive_fields_func
self.repulsive_fields_func = repulsive_fields_func
self.tangential_fields_func = tangential_fields_func
self.super_tab = super_tab
if self.iterations % 5 == 0 and (self.iterations % 50 != 5 and self.iterations % 50 != 10):
for tank in mytanks:
# self.potential_fields[tank.index] = self.calculate_attractive_fields(tank)
# potential_fields = self.calculate_attractive_fields(tank)
target_point = self.figure_out_where_to_go(tank.x, tank.y)
self.move_to_position(tank, target_point[0], target_point[1])
# self.potential_fields[tank.index] = self.potential_fields[tank.index] + self.calculate_repulsive_fields(tank, self.obstacles, mytanks + othertanks)
# self.potential_fields[tank.index] = self.potential_fields[tank.index] + self.calculate_tangential_fields(tank)
# actually move the tanks
# for key in self.potential_fields.keys():
# reduce potential fields to one
# move in direction based off of dx and dy
# self.potential_fields[key] = self.merge_potential_fields(self.potential_fields[key])
# for tank in mytanks:
# self.move_to_position(tank, tank.x + self.potential_fields[tank.index][0], tank.y + self.potential_fields[tank.index][1])
results = self.bzrc.do_commands(self.commands)
def calculate_attractive_fields(self, tank):
dx, dy = self.attractive_fields_func(tank.x, tank.y, 20)
return [(dx, dy)]
def calculate_repulsive_fields(self, tank, obstacles, tanks):
dx, dy = self.repulsive_fields_func(tank.x, tank.y, 20)
return [(dx, dy)]
def calculate_tangential_fields(self, tank):
dx, dy = self.tangential_fields_func(tank.x, tank.y, 20)
return [(dx, dy)]
def attack_enemies(self, tank):
"""Find the closest enemy and chase it, shooting as you go."""
best_enemy = None
best_dist = 2 * float(self.constants['worldsize'])
for enemy in self.enemies:
if enemy.status != 'alive':
continue
dist = math.sqrt((enemy.x - tank.x)**2 + (enemy.y - tank.y)**2)
if dist < best_dist:
best_dist = dist
best_enemy = enemy
if best_enemy is None:
command = Command(tank.index, 0, 0, False)
self.commands.append(command)
else:
self.move_to_position(tank, best_enemy.x, best_enemy.y)
def move_to_position(self, tank, target_x, target_y):
"""Set command to move to given coordinates."""
target_angle = math.atan2(target_y - tank.y,
target_x - tank.x)
relative_angle = self.normalize_angle(target_angle - tank.angle)
command = Command(tank.index, 1, 2 * relative_angle, True)
self.commands.append(command)
def normalize_angle(self, angle):
"""Make any angle be between +/- pi."""
angle -= 2 * math.pi * int (angle / (2 * math.pi))
if angle <= -math.pi:
angle += 2 * math.pi
elif angle > math.pi:
angle -= 2 * math.pi
return angle
def merge_potential_fields(self, fields):
dx = 0
dy = 0
for field in fields:
dx += field[0]
dy += field[1]
return (dx, dy)
def main():
# Process CLI arguments.
try:
execname, host, port = sys.argv
except ValueError:
execname = sys.argv[0]
print >>sys.stderr, '%s: incorrect number of arguments' % execname
print >>sys.stderr, 'usage: %s hostname port' % sys.argv[0]
sys.exit(-1)
# Connect.
#bzrc = BZRC(host, int(port), debug=True)
bzrc = BZRC(host, int(port))
agent = Agent(bzrc)
prev_time = time.time()
# Run the agent
try:
while True:
time_diff = time.time() - prev_time
agent.tick(time_diff)
except KeyboardInterrupt:
print "Exiting due to keyboard interrupt."
bzrc.close()
if __name__ == '__main__':
main()
# vim: et sw=4 sts=4
| gpl-3.0 |
zielmicha/torrent-workshops | bencode.py | 1 | 2431 | import ioutil
import string
import io
EndSenitel = object()
class Decoder():
def __init__(self, stream):
self.stream = stream
def decode(self):
c = self.stream.read(1)
if c == b'i':
data = ioutil.read_until(self.stream, b'e')
if data != b'0' and data.startswith(b'0'):
raise BencodingError()
if data == b'-0':
raise BencodingError()
return int(data)
elif c == b'l':
return self.read_list()
elif c in string.digits.encode():
size_data = ioutil.read_until(self.stream, b':')
size = int(c + size_data)
data = self.stream.read(size)
if len(data) != size:
raise EOFError()
return data
elif c == b'd':
l = self.read_list()
return dict(zip(l[::2], l[1::2]))
elif c == b'e':
return EndSenitel
def read_list(self):
ret = []
while True:
obj = self.decode()
if obj == EndSenitel:
break
ret.append(obj)
return ret
class Encoder():
def __init__(self, stream):
self.stream = stream
def encode(self, obj):
w = self.stream.write
if isinstance(obj, int):
w(b'i')
w(str(obj).encode())
w(b'e')
elif isinstance(obj, bytes):
w(str(len(obj)).encode())
w(b':')
w(obj)
elif isinstance(obj, dict):
items = list(obj.items())
items.sort()
w(b'd')
for k, v in items:
self.encode(k)
self.encode(v)
w(b'e')
elif isinstance(obj, list):
w(b'l')
for v in obj:
self.encode(v)
w(b'e')
elif isinstance(obj, str):
raise BencodingError('str passed to encode - use bytes instead')
else:
raise BencodingError('don\'t know how to encode %s' % type(obj))
class BencodingError(Exception):
pass
def decode(s):
return Decoder(io.BytesIO(s)).decode()
def encode(v):
stream = io.BytesIO()
enc = Encoder(stream)
enc.encode(v)
return stream.getvalue()
if __name__ == '__main__':
import sys
import pprint
v = Decoder(sys.stdin.buffer).decode()
pprint.pprint(v)
| gpl-3.0 |
scorphus/scrapy | scrapy/commands/shell.py | 107 | 2194 | """
Scrapy Shell
See documentation in docs/topics/shell.rst
"""
from threading import Thread
from scrapy.commands import ScrapyCommand
from scrapy.shell import Shell
from scrapy.http import Request
from scrapy.utils.spider import spidercls_for_request, DefaultSpider
class Command(ScrapyCommand):
requires_project = False
default_settings = {'KEEP_ALIVE': True, 'LOGSTATS_INTERVAL': 0}
def syntax(self):
return "[url|file]"
def short_desc(self):
return "Interactive scraping console"
def long_desc(self):
return "Interactive console for scraping the given url"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("-c", dest="code",
help="evaluate the code in the shell, print the result and exit")
parser.add_option("--spider", dest="spider",
help="use this spider")
def update_vars(self, vars):
"""You can use this function to update the Scrapy objects that will be
available in the shell
"""
pass
def run(self, args, opts):
url = args[0] if args else None
spider_loader = self.crawler_process.spider_loader
spidercls = DefaultSpider
if opts.spider:
spidercls = spider_loader.load(opts.spider)
elif url:
spidercls = spidercls_for_request(spider_loader, Request(url),
spidercls, log_multiple=True)
# The crawler is created this way since the Shell manually handles the
# crawling engine, so the set up in the crawl method won't work
crawler = self.crawler_process._create_crawler(spidercls)
# The Shell class needs a persistent engine in the crawler
crawler.engine = crawler._create_engine()
crawler.engine.start()
self._start_crawler_thread()
shell = Shell(crawler, update_vars=self.update_vars, code=opts.code)
shell.start(url=url)
def _start_crawler_thread(self):
t = Thread(target=self.crawler_process.start,
kwargs={'stop_after_crawl': False})
t.daemon = True
t.start()
| bsd-3-clause |
benoitsteiner/tensorflow | tensorflow/python/kernel_tests/softplus_op_test.py | 82 | 4807 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Softplus and SoftplusGrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SoftplusTest(test.TestCase):
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
zero = np.asarray(0).astype(np_features.dtype)
return np.logaddexp(zero, np_features)
def _testSoftplus(self, np_features, use_gpu=False):
np_softplus = self._npSoftplus(np_features)
with self.test_session(use_gpu=use_gpu):
softplus = nn_ops.softplus(np_features)
tf_softplus = softplus.eval()
self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
self.assertTrue(np.all(tf_softplus > 0))
self.assertShapeEqual(np_softplus, softplus)
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
log_eps = np.log(np.finfo(t).eps)
one = t(1)
ten = t(10)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=False)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten - log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=True)
def testGradient(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("softplus (float) gradient err = ", err)
self.assertLess(err, 1e-4)
def testGradGrad(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
(grad,) = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], grad, [2, 5], x_init_value=x_init)
print("softplus (float) gradient of gradient err = ", err)
self.assertLess(err, 5e-5)
def testGradGradGrad(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
(grad,) = gradients_impl.gradients(y, x)
(grad_grad,) = gradients_impl.gradients(grad, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], grad_grad, [2, 5], x_init_value=x_init)
print("softplus (float) third-order gradient err = ", err)
self.assertLess(err, 5e-5)
def testWarnInts(self):
# Running the op triggers address sanitizer errors, so we just make it
nn_ops.softplus(constant_op.constant(7))
if __name__ == "__main__":
test.main()
| apache-2.0 |
pomack/pychecker | pychecker/checker.py | 1 | 13095 | #!/usr/bin/env python
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
# Copyright (c) 2001-2004, MetaSlash Inc. All rights reserved.
# Portions Copyright (c) 2005, Google, Inc. All rights reserved.
"""
Check python source code files for possible errors and print warnings
Contact Info:
http://pychecker.sourceforge.net/
pychecker-list@lists.sourceforge.net
"""
import string
import types
import sys
import imp
import os
import glob
# see __init__.py for meaning, this must match the version there
LOCAL_MAIN_VERSION = 3
def setupNamespace(path) :
# remove pychecker if it's the first component, it needs to be last
if sys.path[0][-9:] == 'pychecker' :
del sys.path[0]
# make sure pychecker is last in path, so we can import
checker_path = os.path.dirname(os.path.dirname(path))
if checker_path not in sys.path :
sys.path.append(checker_path)
def setupSysPathForDevelopment():
import pychecker
this_module = sys.modules[__name__]
# in 2.2 and older, this_module might not have __file__ at all
if not hasattr(this_module, '__file__'):
return
this_path = os.path.normpath(os.path.dirname(this_module.__file__))
pkg_path = os.path.normpath(os.path.dirname(pychecker.__file__))
if pkg_path != this_path:
# pychecker was probably found in site-packages, insert this
# directory before the other one so we can do development and run
# our local version and not the version from site-packages.
pkg_dir = os.path.dirname(pkg_path)
i = 0
for p in sys.path:
if os.path.normpath(p) == pkg_dir:
sys.path.insert(i-1, os.path.dirname(this_path))
break
i = i + 1
del sys.modules['pychecker']
if __name__ == '__main__' :
setupNamespace(sys.argv[0])
setupSysPathForDevelopment()
from pychecker import utils
from pychecker import printer
from pychecker import warn
from pychecker import OP
from pychecker import Config
from pychecker import function
from pychecker import msgs
from pychecker import pcmodules
from pychecker.Warning import Warning
_cfg = None
_VERSION_MISMATCH_ERROR = '''
There seem to be two versions of PyChecker being used.
One is probably in python/site-packages, the other in a local directory.
If you want to run the local version, you must remove the version
from site-packages. Or you can install the current version
by doing python setup.py install.
'''
def cfg() :
return utils.cfg()
def _flattenList(list) :
"Returns a list which contains no lists"
new_list = []
for element in list :
if type(element) == types.ListType :
new_list.extend(_flattenList(element))
else :
new_list.append(element)
return new_list
def getModules(arg_list) :
"""
arg_list is a list of arguments to pychecker; arguments can represent
a module name, a filename, or a wildcard file specification.
Returns a list of (module name, dirPath) that can be imported, where
dirPath is the on-disk path to the module name for that argument.
dirPath can be None (in case the given argument is an actual module).
"""
new_arguments = []
for arg in arg_list :
# is this a wildcard filespec? (necessary for windows)
if '*' in arg or '?' in arg or '[' in arg :
arg = glob.glob(arg)
new_arguments.append(arg)
PY_SUFFIXES = ['.py']
PY_SUFFIX_LENS = [3]
if _cfg.quixote:
PY_SUFFIXES.append('.ptl')
PY_SUFFIX_LENS.append(4)
modules = []
for arg in _flattenList(new_arguments) :
# if arg is an actual module, return None for the directory
arg_dir = None
# is it a .py file?
for suf, suflen in zip(PY_SUFFIXES, PY_SUFFIX_LENS):
if len(arg) > suflen and arg[-suflen:] == suf:
arg_dir = os.path.dirname(arg)
if arg_dir and not os.path.exists(arg) :
print 'File or pathname element does not exist: "%s"' % arg
continue
module_name = os.path.basename(arg)[:-suflen]
arg = module_name
modules.append((arg, arg_dir))
return modules
def getAllModules():
"""
Returns a list of all modules that should be checked.
@rtype: list of L{pcmodules.PyCheckerModule}
"""
modules = []
for module in pcmodules.getPCModules():
if module.check:
modules.append(module)
return modules
_BUILTIN_MODULE_ATTRS = { 'sys': [ 'ps1', 'ps2', 'tracebacklimit',
'exc_type', 'exc_value', 'exc_traceback',
'last_type', 'last_value', 'last_traceback',
],
}
def fixupBuiltinModules(needs_init=0):
for moduleName in sys.builtin_module_names :
# Skip sys since it will reset sys.stdout in IDLE and cause
# stdout to go to the real console rather than the IDLE console.
# FIXME: this breaks test42
# if moduleName == 'sys':
# continue
if needs_init:
_ = pcmodules.PyCheckerModule(moduleName, 0)
# builtin modules don't have a moduleDir
module = pcmodules.getPCModule(moduleName)
if module is not None :
try :
m = imp.init_builtin(moduleName)
except ImportError :
pass
else :
extra_attrs = _BUILTIN_MODULE_ATTRS.get(moduleName, [])
module.attributes = [ '__dict__' ] + dir(m) + extra_attrs
def _printWarnings(warnings, stream=None):
if stream is None:
stream = sys.stdout
warnings.sort()
lastWarning = None
for warning in warnings :
if lastWarning is not None:
# ignore duplicate warnings
if cmp(lastWarning, warning) == 0:
continue
# print blank line between files
if lastWarning.file != warning.file:
stream.write("\n")
lastWarning = warning
warning.output(stream, removeSysPath=True)
class NullModule:
def __getattr__(self, unused_attr):
return None
def install_ignore__import__():
_orig__import__ = None
def __import__(name, globals=None, locals=None, fromlist=None):
if globals is None:
globals = {}
if locals is None:
locals = {}
if fromlist is None:
fromlist = ()
try:
pymodule = _orig__import__(name, globals, locals, fromlist)
except ImportError:
pymodule = NullModule()
if not _cfg.quiet:
modname = '.'.join((name,) + fromlist)
sys.stderr.write("Can't import module: %s, ignoring.\n" % modname)
return pymodule
# keep the orig __import__ around so we can call it
import __builtin__
_orig__import__ = __builtin__.__import__
__builtin__.__import__ = __import__
def processFiles(files, cfg=None, pre_process_cb=None):
"""
@type files: list of str
@type cfg: L{Config.Config}
@param pre_process_cb: callable notifying of module name, filename
@type pre_process_cb: callable taking (str, str)
"""
warnings = []
# insert this here, so we find files in the local dir before std library
if sys.path[0] != '' :
sys.path.insert(0, '')
# ensure we have a config object, it's necessary
global _cfg
if cfg is not None:
_cfg = cfg
elif _cfg is None:
_cfg = Config.Config()
if _cfg.ignoreImportErrors:
install_ignore__import__()
utils.initConfig(_cfg)
utils.debug('Processing %d files' % len(files))
for file, (moduleName, moduleDir) in zip(files, getModules(files)):
if callable(pre_process_cb):
pre_process_cb("module %s (%s)" % (moduleName, file))
# create and load the PyCheckerModule, tricking sys.path temporarily
oldsyspath = sys.path[:]
sys.path.insert(0, moduleDir)
pcmodule = pcmodules.PyCheckerModule(moduleName, moduleDir=moduleDir)
loaded = pcmodule.load()
sys.path = oldsyspath
if not loaded:
w = Warning(pcmodule.filename(), 1,
msgs.Internal("NOT PROCESSED UNABLE TO IMPORT"))
warnings.append(w)
utils.debug('Processed %d files' % len(files))
utils.popConfig()
return warnings
# only used by TKInter options.py
def getWarnings(files, cfg = None, suppressions = None):
warnings = processFiles(files, cfg)
fixupBuiltinModules()
return warnings + warn.find(getAllModules(), _cfg, suppressions)
def _print_processing(name) :
if not _cfg.quiet :
sys.stderr.write("Processing %s...\n" % name)
def main(argv) :
__pychecker__ = 'no-miximport'
import pychecker
if LOCAL_MAIN_VERSION != pychecker.MAIN_MODULE_VERSION :
sys.stderr.write(_VERSION_MISMATCH_ERROR)
sys.exit(100)
# remove empty arguments
argv = filter(None, argv)
# if the first arg starts with an @, read options from the file
# after the @ (this is mostly for windows)
if len(argv) >= 2 and argv[1][0] == '@':
# read data from the file
command_file = argv[1][1:]
try:
f = open(command_file, 'r')
command_line = f.read()
f.close()
except IOError, err:
sys.stderr.write("Unable to read commands from file: %s\n %s\n" % \
(command_file, err))
sys.exit(101)
# convert to an argv list, keeping argv[0] and the files to process
argv = argv[:1] + string.split(command_line) + argv[2:]
global _cfg
_cfg, files, suppressions = Config.setupFromArgs(argv[1:])
utils.initConfig(_cfg)
if not files :
return 0
# Now that we've got the args, update the list of evil C objects
for evil_doer in _cfg.evil:
pcmodules.EVIL_C_OBJECTS[evil_doer] = None
# insert this here, so we find files in the local dir before std library
sys.path.insert(0, '')
utils.debug('main: Finding import warnings')
importWarnings = processFiles(files, _cfg, _print_processing)
utils.debug('main: Found %d import warnings' % len(importWarnings))
fixupBuiltinModules()
if _cfg.printParse :
for module in getAllModules() :
printer.module(module)
utils.debug('main: Finding warnings')
# suppressions is a tuple of suppressions, suppressionRegexs dicts
warnings = warn.find(getAllModules(), _cfg, suppressions)
utils.debug('main: Found %d warnings' % len(warnings))
if not _cfg.quiet :
print "\nWarnings...\n"
if warnings or importWarnings :
_printWarnings(importWarnings + warnings)
return 1
if not _cfg.quiet :
print "None"
return 0
# FIXME: this is a nasty side effect for import checker
if __name__ == '__main__' :
try :
sys.exit(main(sys.argv))
except Config.UsageError :
sys.exit(127)
else :
_orig__import__ = None
_suppressions = None
_warnings_cache = {}
def _get_unique_warnings(warnings):
for i in range(len(warnings)-1, -1, -1):
w = warnings[i].format()
if _warnings_cache.has_key(w):
del warnings[i]
else:
_warnings_cache[w] = 1
return warnings
def __import__(name, globals=None, locals=None, fromlist=None):
if globals is None:
globals = {}
if locals is None:
locals = {}
if fromlist is None:
fromlist = []
check = not sys.modules.has_key(name) and name[:10] != 'pychecker.'
pymodule = _orig__import__(name, globals, locals, fromlist)
if check :
try :
# FIXME: can we find a good moduleDir ?
module = pcmodules.PyCheckerModule(pymodule.__name__)
if module.initModule(pymodule):
warnings = warn.find([module], _cfg, _suppressions)
_printWarnings(_get_unique_warnings(warnings))
else :
print 'Unable to load module', pymodule.__name__
except Exception:
name = getattr(pymodule, '__name__', utils.safestr(pymodule))
# FIXME: can we use it here ?
utils.importError(name)
return pymodule
def _init() :
global _cfg, _suppressions, _orig__import__
args = string.split(os.environ.get('PYCHECKER', ''))
_cfg, files, _suppressions = Config.setupFromArgs(args)
utils.initConfig(_cfg)
fixupBuiltinModules(1)
# keep the orig __import__ around so we can call it
import __builtin__
_orig__import__ = __builtin__.__import__
__builtin__.__import__ = __import__
if not os.environ.get('PYCHECKER_DISABLED') :
_init()
| bsd-3-clause |
citrix-openstack-build/cinder | cinder/tests/test_HpSanISCSIDriver.py | 1 | 9385 | # Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume.drivers.san.hp_lefthand import HpSanISCSIDriver
LOG = logging.getLogger(__name__)
class HpSanISCSITestCase(test.TestCase):
def setUp(self):
super(HpSanISCSITestCase, self).setUp()
self.stubs.Set(HpSanISCSIDriver, "_cliq_run",
self._fake_cliq_run)
self.stubs.Set(HpSanISCSIDriver, "_get_iscsi_properties",
self._fake_get_iscsi_properties)
self.driver = HpSanISCSIDriver()
self.volume_name = "fakevolume"
self.connector = {'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'host': 'fakehost'}
self.properties = {
'target_discoverd': True,
'target_portal': '10.0.1.6:3260',
'target_iqn':
'iqn.2003-10.com.lefthandnetworks:group01:25366:fakev',
'volume_id': 1}
def tearDown(self):
super(HpSanISCSITestCase, self).tearDown()
def _fake_get_iscsi_properties(self, volume):
return self.properties
def _fake_cliq_run(self, verb, cliq_args):
"""Return fake results for the various methods."""
def create_volume(cliq_args):
"""
input = "createVolume description="fake description"
clusterName=Cluster01 volumeName=fakevolume
thinProvision=0 output=XML size=1GB"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="181" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['thinProvision'], '1')
self.assertEqual(cliq_args['size'], '1GB')
return output, None
def delete_volume(cliq_args):
"""
input = "deleteVolume volumeName=fakevolume prompt=false
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="164" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['prompt'], 'false')
return output, None
def assign_volume(cliq_args):
"""
input = "assignVolumeToServer volumeName=fakevolume
serverName=fakehost
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="174" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['serverName'], self.connector['host'])
return output, None
def unassign_volume(cliq_args):
"""
input = "unassignVolumeToServer volumeName=fakevolume
serverName=fakehost output=XML
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="205" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['serverName'], self.connector['host'])
return output, None
def get_cluster_info(cliq_args):
"""
input = "getClusterInfo clusterName=Cluster01 searchDepth=1
verbose=0 output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded." name="CliqSuccess"
processingTime="1164" result="0">
<cluster blockSize="1024" description=""
maxVolumeSizeReplication1="622957690"
maxVolumeSizeReplication2="311480287"
minVolumeSize="262144" name="Cluster01"
pageSize="262144" spaceTotal="633697992"
storageNodeCount="2" unprovisionedSpace="622960574"
useVip="true">
<nsm ipAddress="10.0.1.7" name="111-vsa"/>
<nsm ipAddress="10.0.1.8" name="112-vsa"/>
<vip ipAddress="10.0.1.6" subnetMask="255.255.255.0"/>
</cluster></response></gauche>"""
return output, None
def get_volume_info(cliq_args):
"""
input = "getVolumeInfo volumeName=fakevolume output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded." name="CliqSuccess"
processingTime="87" result="0">
<volume autogrowPages="4" availability="online"
blockSize="1024" bytesWritten="0" checkSum="false"
clusterName="Cluster01" created="2011-02-08T19:56:53Z"
deleting="false" description="" groupName="Group01"
initialQuota="536870912" isPrimary="true"
iscsiIqn="iqn.2003-10.com.lefthandnetworks:group01:25366:fakev"
maxSize="6865387257856" md5="9fa5c8b2cca54b2948a63d833097e1ca"
minReplication="1" name="vol-b" parity="0" replication="2"
reserveQuota="536870912" scratchQuota="4194304"
serialNumber="9fa5c8b2cca54b2948a63d8"
size="1073741824" stridePages="32" thinProvision="true">
<status description="OK" value="2"/>
<permission access="rw" authGroup="api-1"
chapName="chapusername" chapRequired="true"
id="25369" initiatorSecret="" iqn=""
iscsiEnabled="true" loadBalance="true"
targetSecret="supersecret"/>
</volume></response></gauche>"""
return output, None
def test_error(cliq_args):
output = """<gauche version="1.0">
<response description="Volume '134234' not found."
name="CliqVolumeNotFound" processingTime="1083"
result="8000100c"/>
</gauche>"""
return output, None
self.assertEqual(cliq_args['output'], 'XML')
try:
verbs = {'createVolume': create_volume,
'deleteVolume': delete_volume,
'assignVolumeToServer': assign_volume,
'unassignVolumeToServer': unassign_volume,
'getClusterInfo': get_cluster_info,
'getVolumeInfo': get_volume_info,
'testError': test_error}
except KeyError:
raise NotImplementedError()
return verbs[verb](cliq_args)
def test_create_volume(self):
volume = {'name': self.volume_name, 'size': 1}
model_update = self.driver.create_volume(volume)
expected_iqn = "iqn.2003-10.com.lefthandnetworks:group01:25366:fakev 0"
expected_location = "10.0.1.6:3260,1 %s" % expected_iqn
self.assertEqual(model_update['provider_location'], expected_location)
def test_delete_volume(self):
volume = {'name': self.volume_name}
self.driver.delete_volume(volume)
def test_initialize_connection(self):
volume = {'name': self.volume_name}
result = self.driver.initialize_connection(volume, self.connector)
self.assertEqual(result['driver_volume_type'], 'iscsi')
self.assertDictMatch(result['data'], self.properties)
def test_terminate_connection(self):
volume = {'name': self.volume_name}
self.driver.terminate_connection(volume, self.connector)
def test_create_snapshot(self):
try:
self.driver.create_snapshot("")
except NotImplementedError:
pass
def test_create_volume_from_snapshot(self):
try:
self.driver.create_volume_from_snapshot("", "")
except NotImplementedError:
pass
def test_cliq_error(self):
try:
self.driver._cliq_run_xml("testError", {})
except exception.VolumeBackendAPIException:
pass
| apache-2.0 |
jmesteve/openerpseda | openerp/addons/l10n_es_payment_order/wizard/export_remesas.py | 4 | 9922 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2006 ACYSOS S.L. (http://acysos.com) All Rights Reserved.
# Pedro Tarrafeta <pedro@acysos.com>
# Copyright (c) 2008 Pablo Rocandio. All Rights Reserved.
# Copyright (c) 2009 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <jesteve@zikzakmedia.com>
# $Id$
#
# Corregido para instalación TinyERP estándar 4.2.0: Zikzakmedia S.L. 2008
# Jordi Esteve <jesteve@zikzakmedia.com>
#
# Añadidas cuentas de remesas y tipos de pago. 2008
# Pablo Rocandio <salbet@gmail.com>
#
# Rehecho de nuevo para instalación OpenERP 5.0.0 sobre account_payment_extension: Zikzakmedia S.L. 2009
# Jordi Esteve <jesteve@zikzakmedia.com>
#
# Añadidos conceptos extras del CSB 19: Acysos S.L. 2011
# Ignacio Ibeas <ignacio@acysos.com>
#
# Migración de wizard.interface para la 6.1: Pexego Sistemas Informáticos. 2012
# Marta Vázquez Rodríguez <marta@pexego.es>
#
# Refactorización. Acysos S.L. (http://www.acysos.com) 2012
# Ignacio Ibeas <ignacio@acysos.com>
#
# Migración OpenERP 7.0. Acysos S.L. (http://www.acysos.com) 2013
# Ignacio Ibeas <ignacio@acysos.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
import base64
from tools.translate import _
from log import *
def _reopen(self, res_id, model):
return {'type': 'ir.actions.act_window',
'view_mode': 'form',
'view_type': 'form',
'res_id': res_id,
'res_model': self._name,
'target': 'new',
# save original model in context, because selecting the list of available
# templates requires a model in context
'context': {
'default_model': model,
},
}
class wizard_payment_file_spain(osv.osv_memory):
_name = 'wizard.payment.file.spain'
_columns = {
'join': fields.boolean('Join payment lines of the same partner and bank account'),
'note': fields.text('Log'),
'attach_id':fields.many2one('ir.attachment', 'Payment order file', readonly=True),
}
def create_payment_file(self, cr, uid, ids, context):
converter = self.pool.get('payment.converter.spain')
txt_remesa = ''
num_lineas_opc = 0
form_obj = self.browse(cr, uid, ids)[0]
try:
orden = self.pool.get('payment.order').browse(cr, uid, context['active_id'], context)
if not orden.line_ids:
raise Log( _('User error:\n\nWizard can not generate export file, there are not payment lines.'), True )
if orden.create_account_moves == 'direct-payment' and (orden.state != 'open' and orden.state != 'done'):
raise Log( _('User error:\n\nIf direct payment is selected to create the account moves, you should confirm payments befores. Creating the files will make the payments.'), True )
# Comprobamos que exista número de C.C. y que tenga 20 dígitos
if not orden.mode.bank_id:
raise Log( _('User error:\n\nThe bank account of the company %s is not defined.') % (orden.mode.partner_id.name), True )
cc = converter.digits_only(cr,uid,orden.mode.bank_id.acc_number)
if len(cc) != 20:
raise Log( _('User error:\n\nThe bank account number of the company %s has not 20 digits.') % (orden.mode.partner_id.name), True)
# Comprobamos que exista el CIF de la compañía asociada al C.C. del modo de pago
if not orden.mode.bank_id.partner_id.vat:
raise Log(_('User error:\n\nThe company VAT number related to the bank account of the payment mode is not defined.'), True)
recibos = []
if form_obj.join:
# Lista con todos los partners+bancos diferentes de la remesa
partner_bank_l = reduce(lambda l, x: x not in l and l.append(x) or l,
[(recibo.partner_id,recibo.bank_id) for recibo in orden.line_ids], [])
# Cómputo de la lista de recibos agrupados por mismo partner+banco.
# Los importes se suman, los textos se concatenan con un espacio en blanco y las fechas se escoge el máximo
for partner,bank in partner_bank_l:
lineas = [recibo for recibo in orden.line_ids if recibo.partner_id==partner and recibo.bank_id==bank]
recibos.append({
'partner_id': partner,
'bank_id': bank,
'name': partner.ref or str(partner.id),
'amount': reduce(lambda x, y: x+y, [l.amount for l in lineas], 0),
'communication': reduce(lambda x, y: x+' '+(y or ''), [l.name+' '+l.communication for l in lineas], ''),
'communication2': reduce(lambda x, y: x+' '+(y or ''), [l.communication2 for l in lineas], ''),
'date': max([l.date for l in lineas]),
'ml_maturity_date': max([l.ml_maturity_date for l in lineas]),
'create_date': max([l.create_date for l in lineas]),
'ml_date_created': max([l.ml_date_created for l in lineas]),
'ml_inv_ref': [l.ml_inv_ref for l in lineas]
})
else:
# Cada línea de pago es un recibo
for l in orden.line_ids:
recibos.append({
'partner_id': l.partner_id,
'bank_id': l.bank_id,
'name': l.partner_id.ref or str(l.partner_id.id),
'amount': l.amount,
'communication': l.name+' '+l.communication,
'communication2': l.communication2,
'date': l.date,
'ml_maturity_date': l.ml_maturity_date,
'create_date': l.create_date,
'ml_date_created': l.ml_date_created,
'ml_inv_ref':[l.ml_inv_ref]
})
if orden.mode.require_bank_account:
for line in recibos:
ccc = line['bank_id'] and line['bank_id'].acc_number or False
if not ccc:
raise Log(_('User error:\n\nThe bank account number of the customer %s is not defined and current payment mode enforces all lines to have a bank account.') % (line['partner_id'].name), True)
ccc = converter.digits_only(cr,uid,ccc)
if len(ccc) != 20:
raise Log(_('User error:\n\nThe bank account number of the customer %s has not 20 digits.') % (line['partner_id'].name), True)
if orden.mode.tipo == 'csb_19':
csb = self.pool.get('csb.19')
elif orden.mode.tipo == 'csb_32':
csb = self.pool.get('csb.32')
elif orden.mode.tipo == 'csb_34':
csb = self.pool.get('csb.34')
elif orden.mode.tipo == '34_01':
csb = self.pool.get('csb.3401')
elif orden.mode.tipo == 'csb_58':
csb = self.pool.get('csb.58')
else:
raise Log(_('User error:\n\nThe payment mode is not CSB 19, CSB 32, CSB 34 or CSB 58'), True)
txt_remesa = csb.create_file(cr, uid, orden, recibos, context)
except Log, log:
form_obj.write({'note': unicode(log),'pay': False})
return _reopen(self, form_obj.id, 'wizard.payment.file.spain')
else:
# Ensure line breaks use MS-DOS (CRLF) format as standards require.
txt_remesa = txt_remesa.replace('\r\n','\n').replace('\n','\r\n')
file_remesa = base64.encodestring(txt_remesa.encode('utf-8'))
fname = (_('Remittance_%s_%s.txt') %(orden.mode.tipo, orden.reference)).replace('/','-')
# Borrar posible anterior adjunto de la exportación
obj_attachment = self.pool.get('ir.attachment')
attachment_ids = obj_attachment.search(cr, uid, [('name', '=', fname), ('res_model', '=', 'payment.order')])
if len(attachment_ids):
obj_attachment.unlink(cr, uid, attachment_ids)
# Adjuntar nuevo archivo de remesa
attach_id = obj_attachment.create(cr, uid, {
'name': fname,
'datas': file_remesa,
'datas_fname': fname,
'res_model': 'payment.order',
'res_id': orden.id,
}, context=context)
log = _("Successfully Exported\n\nSummary:\n Total amount paid: %.2f\n Total Number of Payments: %d\n") % (orden.total, len(recibos))
self.pool.get('payment.order').set_done(cr, uid, [orden.id], context)
form_obj.write({'note': log,'attach_id':attach_id})
return _reopen(self, form_obj.id, 'wizard.payment.file.spain')
wizard_payment_file_spain()
| agpl-3.0 |
rodrigods/keystone | keystone/common/driver_hints.py | 15 | 2754 | # Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class Hints(object):
"""Encapsulate driver hints for listing entities.
Hints are modifiers that affect the return of entities from a
list_<entities> operation. They are typically passed to a driver to give
direction as to what filtering, pagination or list limiting actions are
being requested.
It is optional for a driver to action some or all of the list hints,
but any filters that it does satisfy must be marked as such by calling
removing the filter from the list.
A Hint object contains filters, which is a list of dicts that can be
accessed publicly. Also it contains a dict called limit, which will
indicate the amount of data we want to limit our listing to.
Each filter term consists of:
* ``name``: the name of the attribute being matched
* ``value``: the value against which it is being matched
* ``comparator``: the operation, which can be one of ``equals``,
``startswith`` or ``endswith``
* ``case_sensitive``: whether any comparison should take account of
case
* ``type``: will always be 'filter'
"""
def __init__(self):
self.limit = None
self.filters = list()
def add_filter(self, name, value, comparator='equals',
case_sensitive=False):
"""Adds a filter to the filters list, which is publicly accessible."""
self.filters.append({'name': name, 'value': value,
'comparator': comparator,
'case_sensitive': case_sensitive,
'type': 'filter'})
def get_exact_filter_by_name(self, name):
"""Return a filter key and value if exact filter exists for name."""
for entry in self.filters:
if (entry['type'] == 'filter' and entry['name'] == name and
entry['comparator'] == 'equals'):
return entry
def set_limit(self, limit, truncated=False):
"""Set a limit to indicate the list should be truncated."""
self.limit = {'limit': limit, 'type': 'limit', 'truncated': truncated}
| apache-2.0 |
amita-kapoor/Neural-Networks | Hopfield_img.py | 1 | 7221 | import numpy as np
import logging as lg
import random
import os
import cv2
from copy import deepcopy
def learn(X):
""" Learns the fundamental memories represented in the input vector X
Args:
Take flattened (1-D) input vectors arranged in an array
Returns:
The interconnection matrix as per the Hebb rule"""
return (np.dot(X.T, X)-X.shape[0]*np.eye(X.shape[-1]))/X.shape[-1]
def read_data(path):
"""Returns the content from the directory specified by path converting them to
flattened (1-D) input vectors arranged in an array
Args:
Take the directory where the input vectors are as input
Returns:
Returns the input vectors flattened and in an array"""
lg.debug('Veryfying if the data file exists')
assert os.path.exists(path)
files=os.listdir(path)
batch=[]
i=0
c=0
for f in files:
if not "txt" in f:
c+=1
continue ## Ensure that we read only txt files
fd=open(path+f)
memory=np.fromstring(fd.read(), dtype=np.int, sep=' ')
batch.append(memory)
fd.close()
arr=np.zeros((len(files)-c,len(batch[1])))
for i in range(0,len(files)-1): arr[i,:]= batch[i]
arr[arr == 0] = -1 # Making array bipolar
lg.info('Converted fundamental memories into a 2d array')
return arr
def read_imgs(path, size):
"""Returns the content from the directory specified by path converting them to
flattened (1-D) input vectors arranged in an array
Args:
Take the directory where the input vectors are as input
Returns:
Returns the input vectors flattened and in an array"""
lg.debug('Veryfying if the data file exists')
assert os.path.exists(path)
files=os.listdir(path)
batch=[]
i=0
c=0
for f in files:
if not "jpg" in f:
c+=1
continue ## Ensure that we read only jpg files
print('reading image', f)
img = cv2.imread(path+f, 0)
print('read image', f)
memory = cv2.resize(img, size)
#(thresh, memory_bw) = cv2.threshold(memory, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) # Automatic Otsu threshold
thresh = 150
memory_bw = cv2.threshold(memory, thresh, 255, cv2.THRESH_BINARY)[1]
cv2.imshow('image', memory_bw)
cv2.waitKey(0)
cv2.destroyAllWindows()
#print(size)
batch.append(memory_bw.flatten())
#print(batch)
arr=np.zeros((len(files)-c,len(batch[1])))
for i in range(0,len(files)-1): arr[i,:]= batch[i]
arr[arr == 0] = -1 # Making array bipolar
arr[arr == 255] = 1
lg.info('Converted fundamental memories into a 2d array')
return arr
def init(path, size):
"""Initializing a Hopfield network with fundamental memories read from folder"""
lg.basicConfig(filename='hopfield.log', level=lg.DEBUG, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
#X = read_data(path)
X = read_imgs(path, size)
np.savetxt('input_file.txt', X, fmt='%d') ## Use loadtxt with savetxt to load file
lg.info('Saved input vector in a file')
W = learn(X)
np.savetxt('weight_file.txt', W, fmt='%f')
lg.info('Saved interconnection matrix in a file')
return
def sgnn(H):
if H > 0:
H = 1
else:
H = -1
return H
def retreive(W,d):
s_old = deepcopy(d)
s_new=np.zeros(s_old.shape)
flag,t=1,0
while flag:
flag=0
#print('for once more')
for i in random.sample(range(0,len(d)), len(d)):
s_new[i]=sgnn(np.dot(W[i,:],s_old))
if s_new[i] != s_old[i]:
flag=1
t=t+1
s_old[i] = s_new[i]
print('Retrived in', t, 'iterations')
#print(s_new)
return s_new.astype(np.int32)
def distort(s,d):
temp = deepcopy(s)
#print(s)
for i in random.sample(range(0,len(s)),d):
temp[i] = -1*temp[i]
#print(" 44444444444444444444444444444 ")
#print(s)
print(sum(np.abs(s-temp)/2.0))
return temp
def noisy(noise_typ,image):
if noise_typ == "gauss":
row,col,ch= image.shape
mean = 0
var = 0.1
sigma = var**0.5
gauss = np.random.normal(mean,sigma,(row,col,ch))
gauss = gauss.reshape(row,col,ch)
noisy = image + gauss
return noisy
elif noise_typ == "s&p":
row,col,ch = image.shape
s_vs_p = 0.5
amount = 0.004
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
elif noise_typ == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
elif noise_typ =="speckle":
row,col,ch = image.shape
gauss = np.random.randn(row,col,ch)
gauss = gauss.reshape(row,col,ch)
noisy = image + image * gauss
return noisy
def display_bipolar_arrays(img_arr):
img_arr[img_arr == -1] = 0
img_arr[img_arr == 1] = 255
return img_arr.astype(np.uint8)
if __name__ == "__main__":
fname='weight_file.txt'
path = 'faces/'
size_m, size_n = 60, 60
#path = 'dgt/'
# size_m, size_n = 10,12
if not os.path.isfile(fname): # If not learned already learn the memories
init(path, (size_m, size_n))
print('network created')
lg.info('Loading interconnection matrix from the file')
W = np.loadtxt('weight_file.txt')
#print W
# For Text files
#f='0.txt' ## Loading the file whose distorted image is to presented
#fd = open(path + f)
# memory = np.fromstring(fd.read(), dtype=np.int, sep=' ')
# memory_distorted=distort(memory,30)
# For images
print('Reading input image')
img = cv2.imread('faces/Alison1.jpg', 0)
memory = cv2.resize(img, (size_m,size_n))
thresh = 150
memory_bw = cv2.threshold(memory, thresh, 255, cv2.THRESH_BINARY)[1]
memory_bw = memory_bw.flatten()
mem_temp = np.asarray(memory_bw, dtype=np.int32)
# Convert to bipolar
mem_temp[mem_temp == 0] = -1
mem_temp[mem_temp == 255] = 1
memory_distorted = distort(mem_temp, 200)
s_retreived = retreive(W,memory_distorted)
s_org = display_bipolar_arrays(mem_temp)
s_img = display_bipolar_arrays(s_retreived)
s_distort = display_bipolar_arrays(memory_distorted)
cv2.imshow('Original Image', s_org.reshape(size_m,size_n))
cv2.imshow('Distorted Image', s_distort.reshape(size_m, size_n))
cv2.imshow('Retreived Image', s_img.reshape(size_m, size_n))
cv2.waitKey(0)
cv2.destroyAllWindows()
| mit |
camptocamp/c2c-rd-addons | account_invoice_2_move/__init__.py | 893 | 1045 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Teino1978-Corp/google-belay | tests/functional/station_tests.py | 4 | 4525 | # Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import unittest
from page_models import *
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from belay_test_utils import *
import time
class StationTests(BelayTest):
def setUp(self):
super(StationTests,self).setUp()
self.ba = open_belay_admin(self.driver)
self.st = self.ba.open_station()
self.st.add_profile("Betsy Claypool", "betsy@gmail.com", "Pennsylvania")
def test_move_section_on_fresh_open(self):
"""
Ensures that moving instances and attribute exchange works immediately
after opening the station.
"""
self.open_new_window("http://localhost:9004")
landing = BuzzerLandingPage(self.driver)
bzr = landing.create_new_instance("Buzz 1")
bzr.close()
self.st.focus()
self.st.close()
self.ba.focus()
self.st = self.ba.open_station()
self.st.personal().set_attributes({
"Name": "Betsy Claypool",
"Location": "Pennsylvania"
})
buzzer_entry = self.st.find_instances_by_name("Buzz 1")[0]
self.st.move_to_category(buzzer_entry, self.st.personal())
js_errors = self.st.get_js_errors()
self.assertEqual(0, len(js_errors), "Found JS Errors: " + str(js_errors))
buzzer_entry = self.st.find_instances_by_name("Buzz 1")[0]
buzzer_entry.open(self.driver)
bzr = BuzzerInstancePage(self.driver)
self.assertTrue("Betsy Claypool", bzr.get_poster_name_attribute())
self.assertTrue("Pennsylvania", bzr.get_poster_location_attribute())
def test_suggestions_offered(self):
"""
Ensures that suggestions will be offered when known instances exist
for a particular site.
"""
self.open_new_window("http://localhost:9004")
landing = BuzzerLandingPage(self.driver)
bzr = landing.create_new_instance("Buzz")
bzr.close()
self.st.focus()
self.open_new_window("http://localhost:9004")
landing = BuzzerLandingPage(self.driver)
landing.wait_for_suggestions()
self.assertEquals(1, len(landing.get_suggestions()))
self.assertEquals("Buzz", landing.get_suggestions()[0])
landing.open_suggestion("Buzz")
bzr = BuzzerInstancePage(self.driver)
self.assertEquals("Buzz", bzr.get_name())
def test_suggestions_not_offered_for_trash(self):
"""
Ensures that when an instance is moved to the trash, that it will not
be offered as a suggestion when the user opens the site.
"""
self.open_new_window("http://localhost:9004")
landing = BuzzerLandingPage(self.driver)
landing = landing.create_new_instance("Buzz")
landing.close()
self.st.focus()
instance = self.st.find_instances_by_name("Buzz")[0]
instance.delete()
self.open_new_window("http://localhost:9004")
landing = BuzzerLandingPage(self.driver)
self.assertEquals(0, len(landing.get_suggestions()))
def create_buzzer(self, name):
self.open_new_window("http://localhost:9004")
landing = BuzzerLandingPage(self.driver)
inst = landing.create_new_instance(name)
inst.close()
self.st.focus()
def test_max_five_instances_shown_in_sections(self):
for name in ['a', 'b', 'c', 'd', 'e', 'f', 'g']:
self.create_buzzer(name)
instances = [inst for inst in self.st.uncategorized().instances() if inst.is_displayed()]
self.assertTrue(len(instances) == 5)
names = [inst.name() for inst in instances]
for name in ['a', 'b']:
self.assertFalse(name in names)
for name in ['c', 'd', 'e', 'f', 'g']:
self.assertTrue(name in names)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
theotherjimmy/yotta | yotta/debug.py | 4 | 1081 | # Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import os
import logging
# validate, , validate things, internal
from .lib import validate
# CMakeGen, , generate build files, internal
from .lib import cmakegen
def addOptions(parser):
parser.add_argument('program', default=None,
help='name of the program to be debugged'
)
def execCommand(args, following_args):
cwd = os.getcwd()
c = validate.currentDirectoryModule()
if not c:
return 1
target, errors = c.satisfyTarget(args.target)
if errors:
for error in errors:
logging.error(error)
return 1
builddir = os.path.join(cwd, 'build', target.getName())
# !!! FIXME: the program should be specified by the description of the
# current project (or a default value for the program should)
errcode = None
error = target.debug(builddir, args.program)
if error:
logging.error(error)
errcode = 1
return errcode
| apache-2.0 |
kylebebak/Requester | deps/oauthlib/oauth2/rfc6749/endpoints/authorization.py | 10 | 4666 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from __future__ import absolute_import, unicode_literals
import logging
from oauthlib.common import Request
from oauthlib.oauth2.rfc6749 import utils
from .base import BaseEndpoint, catch_errors_and_unavailability
log = logging.getLogger(__name__)
class AuthorizationEndpoint(BaseEndpoint):
"""Authorization endpoint - used by the client to obtain authorization
from the resource owner via user-agent redirection.
The authorization endpoint is used to interact with the resource
owner and obtain an authorization grant. The authorization server
MUST first verify the identity of the resource owner. The way in
which the authorization server authenticates the resource owner (e.g.
username and password login, session cookies) is beyond the scope of
this specification.
The endpoint URI MAY include an "application/x-www-form-urlencoded"
formatted (per `Appendix B`_) query component,
which MUST be retained when adding additional query parameters. The
endpoint URI MUST NOT include a fragment component::
https://example.com/path?query=component # OK
https://example.com/path?query=component#fragment # Not OK
Since requests to the authorization endpoint result in user
authentication and the transmission of clear-text credentials (in the
HTTP response), the authorization server MUST require the use of TLS
as described in Section 1.6 when sending requests to the
authorization endpoint::
# We will deny any request which URI schema is not with https
The authorization server MUST support the use of the HTTP "GET"
method [RFC2616] for the authorization endpoint, and MAY support the
use of the "POST" method as well::
# HTTP method is currently not enforced
Parameters sent without a value MUST be treated as if they were
omitted from the request. The authorization server MUST ignore
unrecognized request parameters. Request and response parameters
MUST NOT be included more than once::
# Enforced through the design of oauthlib.common.Request
.. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
"""
def __init__(self, default_response_type, default_token_type,
response_types):
BaseEndpoint.__init__(self)
self._response_types = response_types
self._default_response_type = default_response_type
self._default_token_type = default_token_type
@property
def response_types(self):
return self._response_types
@property
def default_response_type(self):
return self._default_response_type
@property
def default_response_type_handler(self):
return self.response_types.get(self.default_response_type)
@property
def default_token_type(self):
return self._default_token_type
@catch_errors_and_unavailability
def create_authorization_response(self, uri, http_method='GET', body=None,
headers=None, scopes=None, credentials=None):
"""Extract response_type and route to the designated handler."""
request = Request(
uri, http_method=http_method, body=body, headers=headers)
request.scopes = scopes
# TODO: decide whether this should be a required argument
request.user = None # TODO: explain this in docs
for k, v in (credentials or {}).items():
setattr(request, k, v)
response_type_handler = self.response_types.get(
request.response_type, self.default_response_type_handler)
log.debug('Dispatching response_type %s request to %r.',
request.response_type, response_type_handler)
return response_type_handler.create_authorization_response(
request, self.default_token_type)
@catch_errors_and_unavailability
def validate_authorization_request(self, uri, http_method='GET', body=None,
headers=None):
"""Extract response_type and route to the designated handler."""
request = Request(
uri, http_method=http_method, body=body, headers=headers)
request.scopes = utils.scope_to_list(request.scope)
response_type_handler = self.response_types.get(
request.response_type, self.default_response_type_handler)
return response_type_handler.validate_authorization_request(request)
| mit |
mouseratti/guake | guake/settings.py | 4 | 4359 | # -*- coding: utf-8; -*-
"""
Copyright (C) 2007-2012 Lincoln de Sousa <lincoln@minaslivre.org>
Copyright (C) 2007 Gabriel Falcão <gabrielteratos@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301 USA
"""
import logging
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gio
log = logging.getLogger(__name__)
class Settings():
def __init__(self, schema_source):
Settings.enhanceSetting()
self.guake = Gio.Settings.new_full(
Gio.SettingsSchemaSource.lookup(schema_source, "guake", False), None, None
)
self.guake.initEnhancements()
self.guake.connect("changed", self.guake.triggerOnChangedValue)
self.general = Gio.Settings.new_full(
Gio.SettingsSchemaSource.lookup(schema_source, "guake.general", False), None, None
)
self.general.initEnhancements()
self.general.connect("changed", self.general.triggerOnChangedValue)
self.keybindings = Gio.Settings.new_full(
Gio.SettingsSchemaSource.lookup(schema_source, "guake.keybindings", False), None, None
)
self.keybindings.initEnhancements()
self.keybindings.connect("changed", self.keybindings.triggerOnChangedValue)
self.keybindingsGlobal = Gio.Settings.new_full(
Gio.SettingsSchemaSource.lookup(schema_source, "guake.keybindings.global", False), None,
None
)
self.keybindingsGlobal.initEnhancements()
self.keybindingsGlobal.connect("changed", self.keybindingsGlobal.triggerOnChangedValue)
self.keybindingsLocal = Gio.Settings.new_full(
Gio.SettingsSchemaSource.lookup(schema_source, "guake.keybindings.local", False), None,
None
)
self.keybindingsLocal.initEnhancements()
self.keybindingsLocal.connect("changed", self.keybindingsLocal.triggerOnChangedValue)
self.styleBackground = Gio.Settings.new_full(
Gio.SettingsSchemaSource.lookup(schema_source, "guake.style.background", False), None,
None
)
self.styleBackground.initEnhancements()
self.styleBackground.connect("changed", self.styleBackground.triggerOnChangedValue)
self.styleFont = Gio.Settings.new_full(
Gio.SettingsSchemaSource.lookup(schema_source, "guake.style.font", False), None, None
)
self.styleFont.initEnhancements()
self.styleFont.connect("changed", self.styleFont.triggerOnChangedValue)
self.style = Gio.Settings.new_full(
Gio.SettingsSchemaSource.lookup(schema_source, "guake.style", False), None, None
)
self.style.initEnhancements()
self.style.connect("changed", self.style.triggerOnChangedValue)
self.hooks = Gio.Settings.new_full(
Gio.SettingsSchemaSource.lookup(schema_source, "guake.hooks", False), None, None
)
self.hooks.initEnhancements()
self.hooks.connect("changed", self.hooks.triggerOnChangedValue)
def enhanceSetting():
def initEnhancements(self):
self.listeners = dict()
def onChangedValue(self, key, user_func):
if key not in self.listeners:
self.listeners[key] = list()
self.listeners[key].append(user_func)
def triggerOnChangedValue(self, settings, key, user_data=None):
if key in self.listeners:
for func in self.listeners[key]:
func(settings, key, user_data)
gi.repository.Gio.Settings.initEnhancements = initEnhancements
gi.repository.Gio.Settings.onChangedValue = onChangedValue
gi.repository.Gio.Settings.triggerOnChangedValue = triggerOnChangedValue
| gpl-2.0 |
gylian/Sick-Beard | lib/gntp/notifier.py | 122 | 8299 | # Copyright: 2013 Paul Traylor
# These sources are released under the terms of the MIT license: see LICENSE
"""
The gntp.notifier module is provided as a simple way to send notifications
using GNTP
.. note::
This class is intended to mostly mirror the older Python bindings such
that you should be able to replace instances of the old bindings with
this class.
`Original Python bindings <http://code.google.com/p/growl/source/browse/Bindings/python/Growl.py>`_
"""
import logging
import platform
import socket
import sys
from gntp.version import __version__
import gntp.core
import gntp.errors as errors
import gntp.shim
__all__ = [
'mini',
'GrowlNotifier',
]
logger = logging.getLogger(__name__)
class GrowlNotifier(object):
"""Helper class to simplfy sending Growl messages
:param string applicationName: Sending application name
:param list notification: List of valid notifications
:param list defaultNotifications: List of notifications that should be enabled
by default
:param string applicationIcon: Icon URL
:param string hostname: Remote host
:param integer port: Remote port
"""
passwordHash = 'MD5'
socketTimeout = 3
def __init__(self, applicationName='Python GNTP', notifications=[],
defaultNotifications=None, applicationIcon=None, hostname='localhost',
password=None, port=23053):
self.applicationName = applicationName
self.notifications = list(notifications)
if defaultNotifications:
self.defaultNotifications = list(defaultNotifications)
else:
self.defaultNotifications = self.notifications
self.applicationIcon = applicationIcon
self.password = password
self.hostname = hostname
self.port = int(port)
def _checkIcon(self, data):
'''
Check the icon to see if it's valid
If it's a simple URL icon, then we return True. If it's a data icon
then we return False
'''
logger.info('Checking icon')
return gntp.shim.u(data).startswith('http')
def register(self):
"""Send GNTP Registration
.. warning::
Before sending notifications to Growl, you need to have
sent a registration message at least once
"""
logger.info('Sending registration to %s:%s', self.hostname, self.port)
register = gntp.core.GNTPRegister()
register.add_header('Application-Name', self.applicationName)
for notification in self.notifications:
enabled = notification in self.defaultNotifications
register.add_notification(notification, enabled)
if self.applicationIcon:
if self._checkIcon(self.applicationIcon):
register.add_header('Application-Icon', self.applicationIcon)
else:
resource = register.add_resource(self.applicationIcon)
register.add_header('Application-Icon', resource)
if self.password:
register.set_password(self.password, self.passwordHash)
self.add_origin_info(register)
self.register_hook(register)
return self._send('register', register)
def notify(self, noteType, title, description, icon=None, sticky=False,
priority=None, callback=None, identifier=None, custom={}):
"""Send a GNTP notifications
.. warning::
Must have registered with growl beforehand or messages will be ignored
:param string noteType: One of the notification names registered earlier
:param string title: Notification title (usually displayed on the notification)
:param string description: The main content of the notification
:param string icon: Icon URL path
:param boolean sticky: Sticky notification
:param integer priority: Message priority level from -2 to 2
:param string callback: URL callback
:param dict custom: Custom attributes. Key names should be prefixed with X-
according to the spec but this is not enforced by this class
.. warning::
For now, only URL callbacks are supported. In the future, the
callback argument will also support a function
"""
logger.info('Sending notification [%s] to %s:%s', noteType, self.hostname, self.port)
assert noteType in self.notifications
notice = gntp.core.GNTPNotice()
notice.add_header('Application-Name', self.applicationName)
notice.add_header('Notification-Name', noteType)
notice.add_header('Notification-Title', title)
if self.password:
notice.set_password(self.password, self.passwordHash)
if sticky:
notice.add_header('Notification-Sticky', sticky)
if priority:
notice.add_header('Notification-Priority', priority)
if icon:
if self._checkIcon(icon):
notice.add_header('Notification-Icon', icon)
else:
resource = notice.add_resource(icon)
notice.add_header('Notification-Icon', resource)
if description:
notice.add_header('Notification-Text', description)
if callback:
notice.add_header('Notification-Callback-Target', callback)
if identifier:
notice.add_header('Notification-Coalescing-ID', identifier)
for key in custom:
notice.add_header(key, custom[key])
self.add_origin_info(notice)
self.notify_hook(notice)
return self._send('notify', notice)
def subscribe(self, id, name, port):
"""Send a Subscribe request to a remote machine"""
sub = gntp.core.GNTPSubscribe()
sub.add_header('Subscriber-ID', id)
sub.add_header('Subscriber-Name', name)
sub.add_header('Subscriber-Port', port)
if self.password:
sub.set_password(self.password, self.passwordHash)
self.add_origin_info(sub)
self.subscribe_hook(sub)
return self._send('subscribe', sub)
def add_origin_info(self, packet):
"""Add optional Origin headers to message"""
packet.add_header('Origin-Machine-Name', platform.node())
packet.add_header('Origin-Software-Name', 'gntp.py')
packet.add_header('Origin-Software-Version', __version__)
packet.add_header('Origin-Platform-Name', platform.system())
packet.add_header('Origin-Platform-Version', platform.platform())
def register_hook(self, packet):
pass
def notify_hook(self, packet):
pass
def subscribe_hook(self, packet):
pass
def _send(self, messagetype, packet):
"""Send the GNTP Packet"""
packet.validate()
data = packet.encode()
logger.debug('To : %s:%s <%s>\n%s', self.hostname, self.port, packet.__class__, data)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.socketTimeout)
try:
s.connect((self.hostname, self.port))
s.send(data)
recv_data = s.recv(1024)
while not recv_data.endswith(gntp.shim.b("\r\n\r\n")):
recv_data += s.recv(1024)
except socket.error:
# Python2.5 and Python3 compatibile exception
exc = sys.exc_info()[1]
raise errors.NetworkError(exc)
response = gntp.core.parse_gntp(recv_data)
s.close()
logger.debug('From : %s:%s <%s>\n%s', self.hostname, self.port, response.__class__, response)
if type(response) == gntp.core.GNTPOK:
return True
logger.error('Invalid response: %s', response.error())
return response.error()
def mini(description, applicationName='PythonMini', noteType="Message",
title="Mini Message", applicationIcon=None, hostname='localhost',
password=None, port=23053, sticky=False, priority=None,
callback=None, notificationIcon=None, identifier=None,
notifierFactory=GrowlNotifier):
"""Single notification function
Simple notification function in one line. Has only one required parameter
and attempts to use reasonable defaults for everything else
:param string description: Notification message
.. warning::
For now, only URL callbacks are supported. In the future, the
callback argument will also support a function
"""
try:
growl = notifierFactory(
applicationName=applicationName,
notifications=[noteType],
defaultNotifications=[noteType],
applicationIcon=applicationIcon,
hostname=hostname,
password=password,
port=port,
)
result = growl.register()
if result is not True:
return result
return growl.notify(
noteType=noteType,
title=title,
description=description,
icon=notificationIcon,
sticky=sticky,
priority=priority,
callback=callback,
identifier=identifier,
)
except Exception:
# We want the "mini" function to be simple and swallow Exceptions
# in order to be less invasive
logger.exception("Growl error")
if __name__ == '__main__':
# If we're running this module directly we're likely running it as a test
# so extra debugging is useful
logging.basicConfig(level=logging.INFO)
mini('Testing mini notification')
| gpl-3.0 |
mariaantoanelam/Licenta | Lib/test/test_gl.py | 15 | 6640 | #! /usr/bin/env python
"""Very simple test script for the SGI gl library extension module
taken mostly from the documentation.
Roger E. Masse
"""
from test_support import verbose, TestSkipped
import gl, GL, time
glattrs = ['RGBcolor', 'RGBcursor', 'RGBmode', 'RGBrange', 'RGBwritemask',
'__doc__', '__name__', 'addtopup', 'altgetmatrix', 'arc', 'arcf',
'arcfi', 'arcfs', 'arci', 'arcs', 'attachcursor', 'backbuffer',
'backface', 'bbox2', 'bbox2i', 'bbox2s', 'bgnclosedline', 'bgnline',
'bgnpoint', 'bgnpolygon', 'bgnsurface', 'bgntmesh', 'bgntrim',
'blankscreen', 'blanktime', 'blendfunction', 'blink', 'c3f', 'c3i',
'c3s', 'c4f', 'c4i', 'c4s', 'callobj', 'charstr', 'chunksize', 'circ',
'circf', 'circfi', 'circfs', 'circi', 'circs', 'clear',
'clearhitcode', 'clkoff', 'clkon', 'closeobj', 'cmode', 'cmov',
'cmov2', 'cmov2i', 'cmov2s', 'cmovi', 'cmovs', 'color', 'colorf',
'compactify', 'concave', 'cpack', 'crv', 'crvn', 'curorigin',
'cursoff', 'curson', 'curstype', 'curvebasis', 'curveit',
'curveprecision', 'cyclemap', 'czclear', 'defbasis', 'defcursor',
'deflinestyle', 'delobj', 'deltag', 'depthcue', 'devport', 'dglclose',
'dglopen', 'dither', 'dopup', 'doublebuffer', 'draw', 'draw2',
'draw2i', 'draw2s', 'drawi', 'drawmode', 'draws', 'editobj',
'endclosedline', 'endfullscrn', 'endline', 'endpick', 'endpoint',
'endpolygon', 'endpupmode', 'endselect', 'endsurface', 'endtmesh',
'endtrim', 'finish', 'font', 'foreground', 'freepup', 'frontbuffer',
'fudge', 'fullscrn', 'gRGBcolor', 'gRGBmask', 'gammaramp', 'gbegin',
'gconfig', 'genobj', 'gentag', 'getbackface', 'getbuffer',
'getbutton', 'getcmmode', 'getcolor', 'getcpos', 'getcursor',
'getdcm', 'getdepth', 'getdescender', 'getdisplaymode', 'getdrawmode',
'getfont', 'getgdesc', 'getgpos', 'getheight', 'gethitcode',
'getlsbackup', 'getlsrepeat', 'getlstyle', 'getlwidth', 'getmap',
'getmatrix', 'getmcolor', 'getmmode', 'getmonitor',
'getnurbsproperty', 'getopenobj', 'getorigin', 'getothermonitor',
'getpattern', 'getplanes', 'getport', 'getresetls', 'getscrmask',
'getshade', 'getsize', 'getsm', 'gettp', 'getvaluator', 'getvideo',
'getviewport', 'getwritemask', 'getzbuffer', 'gewrite', 'gflush',
'ginit', 'glcompat', 'greset', 'gselect', 'gsync', 'gversion',
'iconsize', 'icontitle', 'imakebackground', 'initnames', 'ismex',
'isobj', 'isqueued', 'istag', 'keepaspect', 'lRGBrange', 'lampoff',
'lampon', 'linesmooth', 'linewidth', 'lmbind', 'lmcolor', 'lmdef',
'loadmatrix', 'loadname', 'logicop', 'lookat', 'lrectread',
'lrectwrite', 'lsbackup', 'lsetdepth', 'lshaderange', 'lsrepeat',
'makeobj', 'maketag', 'mapcolor', 'mapw', 'mapw2', 'maxsize',
'minsize', 'mmode', 'move', 'move2', 'move2i', 'move2s', 'movei',
'moves', 'multimap', 'multmatrix', 'n3f', 'newpup', 'newtag',
'noborder', 'noise', 'noport', 'normal', 'nurbscurve', 'nurbssurface',
'nvarray', 'objdelete', 'objinsert', 'objreplace', 'onemap', 'ortho',
'ortho2', 'overlay', 'packrect', 'pagecolor', 'pagewritemask',
'passthrough', 'patch', 'patchbasis', 'patchcurves', 'patchprecision',
'pclos', 'pdr', 'pdr2', 'pdr2i', 'pdr2s', 'pdri', 'pdrs',
'perspective', 'pick', 'picksize', 'pixmode', 'pmv', 'pmv2', 'pmv2i',
'pmv2s', 'pmvi', 'pmvs', 'pnt', 'pnt2', 'pnt2i', 'pnt2s', 'pnti',
'pnts', 'pntsmooth', 'polarview', 'polf', 'polf2', 'polf2i', 'polf2s',
'polfi', 'polfs', 'poly', 'poly2', 'poly2i', 'poly2s', 'polyi',
'polys', 'popattributes', 'popmatrix', 'popname', 'popviewport',
'prefposition', 'prefsize', 'pupmode', 'pushattributes', 'pushmatrix',
'pushname', 'pushviewport', 'pwlcurve', 'qdevice', 'qenter', 'qgetfd',
'qread', 'qreset', 'qtest', 'rcrv', 'rcrvn', 'rdr', 'rdr2', 'rdr2i',
'rdr2s', 'rdri', 'rdrs', 'readdisplay', 'readsource', 'rect',
'rectcopy', 'rectf', 'rectfi', 'rectfs', 'recti', 'rects', 'rectzoom',
'resetls', 'reshapeviewport', 'ringbell', 'rmv', 'rmv2', 'rmv2i',
'rmv2s', 'rmvi', 'rmvs', 'rot', 'rotate', 'rpatch', 'rpdr', 'rpdr2',
'rpdr2i', 'rpdr2s', 'rpdri', 'rpdrs', 'rpmv', 'rpmv2', 'rpmv2i',
'rpmv2s', 'rpmvi', 'rpmvs', 'sbox', 'sboxf', 'sboxfi', 'sboxfs',
'sboxi', 'sboxs', 'scale', 'screenspace', 'scrmask', 'setbell',
'setcursor', 'setdepth', 'setlinestyle', 'setmap', 'setmonitor',
'setnurbsproperty', 'setpattern', 'setpup', 'setshade', 'setvaluator',
'setvideo', 'shademodel', 'shaderange', 'singlebuffer', 'smoothline',
'spclos', 'splf', 'splf2', 'splf2i', 'splf2s', 'splfi', 'splfs',
'stepunit', 'strwidth', 'subpixel', 'swapbuffers', 'swapinterval',
'swaptmesh', 'swinopen', 'textcolor', 'textinit', 'textport',
'textwritemask', 'tie', 'tpoff', 'tpon', 'translate', 'underlay',
'unpackrect', 'unqdevice', 'v2d', 'v2f', 'v2i', 'v2s', 'v3d', 'v3f',
'v3i', 'v3s', 'v4d', 'v4f', 'v4i', 'v4s', 'varray', 'videocmd',
'viewport', 'vnarray', 'winattach', 'winclose', 'winconstraints',
'windepth', 'window', 'winget', 'winmove', 'winopen', 'winpop',
'winposition', 'winpush', 'winset', 'wintitle', 'wmpack', 'writemask',
'writepixels', 'xfpt', 'xfpt2', 'xfpt2i', 'xfpt2s', 'xfpt4', 'xfpt4i',
'xfpt4s', 'xfpti', 'xfpts', 'zbuffer', 'zclear', 'zdraw', 'zfunction',
'zsource', 'zwritemask']
def main():
# insure that we at least have an X display before continuing.
import os
try:
display = os.environ['DISPLAY']
except:
raise TestSkipped, "No $DISPLAY -- skipping gl test"
# touch all the attributes of gl without doing anything
if verbose:
print 'Touching gl module attributes...'
for attr in glattrs:
if verbose:
print 'touching: ', attr
getattr(gl, attr)
# create a small 'Crisscross' window
if verbose:
print 'Creating a small "CrissCross" window...'
print 'foreground'
gl.foreground()
if verbose:
print 'prefposition'
gl.prefposition(500, 900, 500, 900)
if verbose:
print 'winopen "CrissCross"'
w = gl.winopen('CrissCross')
if verbose:
print 'clear'
gl.clear()
if verbose:
print 'ortho2'
gl.ortho2(0.0, 400.0, 0.0, 400.0)
if verbose:
print 'color WHITE'
gl.color(GL.WHITE)
if verbose:
print 'color RED'
gl.color(GL.RED)
if verbose:
print 'bgnline'
gl.bgnline()
if verbose:
print 'v2f'
gl.v2f(0.0, 0.0)
gl.v2f(400.0, 400.0)
if verbose:
print 'endline'
gl.endline()
if verbose:
print 'bgnline'
gl.bgnline()
if verbose:
print 'v2i'
gl.v2i(400, 0)
gl.v2i(0, 400)
if verbose:
print 'endline'
gl.endline()
if verbose:
print 'Displaying window for 2 seconds...'
time.sleep(2)
if verbose:
print 'winclose'
gl.winclose(w)
main()
| mit |
0jpq0/kbengine | kbe/res/scripts/common/Lib/idlelib/RemoteDebugger.py | 137 | 12029 | """Support for remote Python debugging.
Some ASCII art to describe the structure:
IN PYTHON SUBPROCESS # IN IDLE PROCESS
#
# oid='gui_adapter'
+----------+ # +------------+ +-----+
| GUIProxy |--remote#call-->| GUIAdapter |--calls-->| GUI |
+-----+--calls-->+----------+ # +------------+ +-----+
| Idb | # /
+-----+<-calls--+------------+ # +----------+<--calls-/
| IdbAdapter |<--remote#call--| IdbProxy |
+------------+ # +----------+
oid='idb_adapter' #
The purpose of the Proxy and Adapter classes is to translate certain
arguments and return values that cannot be transported through the RPC
barrier, in particular frame and traceback objects.
"""
import types
from idlelib import rpc
from idlelib import Debugger
debugging = 0
idb_adap_oid = "idb_adapter"
gui_adap_oid = "gui_adapter"
#=======================================
#
# In the PYTHON subprocess:
frametable = {}
dicttable = {}
codetable = {}
tracebacktable = {}
def wrap_frame(frame):
fid = id(frame)
frametable[fid] = frame
return fid
def wrap_info(info):
"replace info[2], a traceback instance, by its ID"
if info is None:
return None
else:
traceback = info[2]
assert isinstance(traceback, types.TracebackType)
traceback_id = id(traceback)
tracebacktable[traceback_id] = traceback
modified_info = (info[0], info[1], traceback_id)
return modified_info
class GUIProxy:
def __init__(self, conn, gui_adap_oid):
self.conn = conn
self.oid = gui_adap_oid
def interaction(self, message, frame, info=None):
# calls rpc.SocketIO.remotecall() via run.MyHandler instance
# pass frame and traceback object IDs instead of the objects themselves
self.conn.remotecall(self.oid, "interaction",
(message, wrap_frame(frame), wrap_info(info)),
{})
class IdbAdapter:
def __init__(self, idb):
self.idb = idb
#----------called by an IdbProxy----------
def set_step(self):
self.idb.set_step()
def set_quit(self):
self.idb.set_quit()
def set_continue(self):
self.idb.set_continue()
def set_next(self, fid):
frame = frametable[fid]
self.idb.set_next(frame)
def set_return(self, fid):
frame = frametable[fid]
self.idb.set_return(frame)
def get_stack(self, fid, tbid):
frame = frametable[fid]
if tbid is None:
tb = None
else:
tb = tracebacktable[tbid]
stack, i = self.idb.get_stack(frame, tb)
stack = [(wrap_frame(frame), k) for frame, k in stack]
return stack, i
def run(self, cmd):
import __main__
self.idb.run(cmd, __main__.__dict__)
def set_break(self, filename, lineno):
msg = self.idb.set_break(filename, lineno)
return msg
def clear_break(self, filename, lineno):
msg = self.idb.clear_break(filename, lineno)
return msg
def clear_all_file_breaks(self, filename):
msg = self.idb.clear_all_file_breaks(filename)
return msg
#----------called by a FrameProxy----------
def frame_attr(self, fid, name):
frame = frametable[fid]
return getattr(frame, name)
def frame_globals(self, fid):
frame = frametable[fid]
dict = frame.f_globals
did = id(dict)
dicttable[did] = dict
return did
def frame_locals(self, fid):
frame = frametable[fid]
dict = frame.f_locals
did = id(dict)
dicttable[did] = dict
return did
def frame_code(self, fid):
frame = frametable[fid]
code = frame.f_code
cid = id(code)
codetable[cid] = code
return cid
#----------called by a CodeProxy----------
def code_name(self, cid):
code = codetable[cid]
return code.co_name
def code_filename(self, cid):
code = codetable[cid]
return code.co_filename
#----------called by a DictProxy----------
def dict_keys(self, did):
raise NotImplemented("dict_keys not public or pickleable")
## dict = dicttable[did]
## return dict.keys()
### Needed until dict_keys is type is finished and pickealable.
### Will probably need to extend rpc.py:SocketIO._proxify at that time.
def dict_keys_list(self, did):
dict = dicttable[did]
return list(dict.keys())
def dict_item(self, did, key):
dict = dicttable[did]
value = dict[key]
value = repr(value) ### can't pickle module 'builtins'
return value
#----------end class IdbAdapter----------
def start_debugger(rpchandler, gui_adap_oid):
"""Start the debugger and its RPC link in the Python subprocess
Start the subprocess side of the split debugger and set up that side of the
RPC link by instantiating the GUIProxy, Idb debugger, and IdbAdapter
objects and linking them together. Register the IdbAdapter with the
RPCServer to handle RPC requests from the split debugger GUI via the
IdbProxy.
"""
gui_proxy = GUIProxy(rpchandler, gui_adap_oid)
idb = Debugger.Idb(gui_proxy)
idb_adap = IdbAdapter(idb)
rpchandler.register(idb_adap_oid, idb_adap)
return idb_adap_oid
#=======================================
#
# In the IDLE process:
class FrameProxy:
def __init__(self, conn, fid):
self._conn = conn
self._fid = fid
self._oid = "idb_adapter"
self._dictcache = {}
def __getattr__(self, name):
if name[:1] == "_":
raise AttributeError(name)
if name == "f_code":
return self._get_f_code()
if name == "f_globals":
return self._get_f_globals()
if name == "f_locals":
return self._get_f_locals()
return self._conn.remotecall(self._oid, "frame_attr",
(self._fid, name), {})
def _get_f_code(self):
cid = self._conn.remotecall(self._oid, "frame_code", (self._fid,), {})
return CodeProxy(self._conn, self._oid, cid)
def _get_f_globals(self):
did = self._conn.remotecall(self._oid, "frame_globals",
(self._fid,), {})
return self._get_dict_proxy(did)
def _get_f_locals(self):
did = self._conn.remotecall(self._oid, "frame_locals",
(self._fid,), {})
return self._get_dict_proxy(did)
def _get_dict_proxy(self, did):
if did in self._dictcache:
return self._dictcache[did]
dp = DictProxy(self._conn, self._oid, did)
self._dictcache[did] = dp
return dp
class CodeProxy:
def __init__(self, conn, oid, cid):
self._conn = conn
self._oid = oid
self._cid = cid
def __getattr__(self, name):
if name == "co_name":
return self._conn.remotecall(self._oid, "code_name",
(self._cid,), {})
if name == "co_filename":
return self._conn.remotecall(self._oid, "code_filename",
(self._cid,), {})
class DictProxy:
def __init__(self, conn, oid, did):
self._conn = conn
self._oid = oid
self._did = did
## def keys(self):
## return self._conn.remotecall(self._oid, "dict_keys", (self._did,), {})
# 'temporary' until dict_keys is a pickleable built-in type
def keys(self):
return self._conn.remotecall(self._oid,
"dict_keys_list", (self._did,), {})
def __getitem__(self, key):
return self._conn.remotecall(self._oid, "dict_item",
(self._did, key), {})
def __getattr__(self, name):
##print("*** Failed DictProxy.__getattr__:", name)
raise AttributeError(name)
class GUIAdapter:
def __init__(self, conn, gui):
self.conn = conn
self.gui = gui
def interaction(self, message, fid, modified_info):
##print("*** Interaction: (%s, %s, %s)" % (message, fid, modified_info))
frame = FrameProxy(self.conn, fid)
self.gui.interaction(message, frame, modified_info)
class IdbProxy:
def __init__(self, conn, shell, oid):
self.oid = oid
self.conn = conn
self.shell = shell
def call(self, methodname, *args, **kwargs):
##print("*** IdbProxy.call %s %s %s" % (methodname, args, kwargs))
value = self.conn.remotecall(self.oid, methodname, args, kwargs)
##print("*** IdbProxy.call %s returns %r" % (methodname, value))
return value
def run(self, cmd, locals):
# Ignores locals on purpose!
seq = self.conn.asyncqueue(self.oid, "run", (cmd,), {})
self.shell.interp.active_seq = seq
def get_stack(self, frame, tbid):
# passing frame and traceback IDs, not the objects themselves
stack, i = self.call("get_stack", frame._fid, tbid)
stack = [(FrameProxy(self.conn, fid), k) for fid, k in stack]
return stack, i
def set_continue(self):
self.call("set_continue")
def set_step(self):
self.call("set_step")
def set_next(self, frame):
self.call("set_next", frame._fid)
def set_return(self, frame):
self.call("set_return", frame._fid)
def set_quit(self):
self.call("set_quit")
def set_break(self, filename, lineno):
msg = self.call("set_break", filename, lineno)
return msg
def clear_break(self, filename, lineno):
msg = self.call("clear_break", filename, lineno)
return msg
def clear_all_file_breaks(self, filename):
msg = self.call("clear_all_file_breaks", filename)
return msg
def start_remote_debugger(rpcclt, pyshell):
"""Start the subprocess debugger, initialize the debugger GUI and RPC link
Request the RPCServer start the Python subprocess debugger and link. Set
up the Idle side of the split debugger by instantiating the IdbProxy,
debugger GUI, and debugger GUIAdapter objects and linking them together.
Register the GUIAdapter with the RPCClient to handle debugger GUI
interaction requests coming from the subprocess debugger via the GUIProxy.
The IdbAdapter will pass execution and environment requests coming from the
Idle debugger GUI to the subprocess debugger via the IdbProxy.
"""
global idb_adap_oid
idb_adap_oid = rpcclt.remotecall("exec", "start_the_debugger",\
(gui_adap_oid,), {})
idb_proxy = IdbProxy(rpcclt, pyshell, idb_adap_oid)
gui = Debugger.Debugger(pyshell, idb_proxy)
gui_adap = GUIAdapter(rpcclt, gui)
rpcclt.register(gui_adap_oid, gui_adap)
return gui
def close_remote_debugger(rpcclt):
"""Shut down subprocess debugger and Idle side of debugger RPC link
Request that the RPCServer shut down the subprocess debugger and link.
Unregister the GUIAdapter, which will cause a GC on the Idle process
debugger and RPC link objects. (The second reference to the debugger GUI
is deleted in PyShell.close_remote_debugger().)
"""
close_subprocess_debugger(rpcclt)
rpcclt.unregister(gui_adap_oid)
def close_subprocess_debugger(rpcclt):
rpcclt.remotecall("exec", "stop_the_debugger", (idb_adap_oid,), {})
def restart_subprocess_debugger(rpcclt):
idb_adap_oid_ret = rpcclt.remotecall("exec", "start_the_debugger",\
(gui_adap_oid,), {})
assert idb_adap_oid_ret == idb_adap_oid, 'Idb restarted with different oid'
| lgpl-3.0 |
KohlsTechnology/ansible | lib/ansible/modules/database/vertica/vertica_role.py | 55 | 7933 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: vertica_role
version_added: '2.0'
short_description: Adds or removes Vertica database roles and assigns roles to them.
description:
- Adds or removes Vertica database role and, optionally, assign other roles.
options:
name:
description:
- Name of the role to add or remove.
required: true
assigned_roles:
description:
- Comma separated list of roles to assign to the role.
aliases: ['assigned_role']
state:
description:
- Whether to create C(present), drop C(absent) or lock C(locked) a role.
choices: ['present', 'absent']
default: present
db:
description:
- Name of the Vertica database.
cluster:
description:
- Name of the Vertica cluster.
default: localhost
port:
description:
- Vertica cluster port to connect to.
default: 5433
login_user:
description:
- The username used to authenticate with.
default: dbadmin
login_password:
description:
- The password used to authenticate with.
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """
- name: creating a new vertica role
vertica_role: name=role_name db=db_name state=present
- name: creating a new vertica role with other role assigned
vertica_role: name=role_name assigned_role=other_role_name state=present
"""
import traceback
try:
import pyodbc
except ImportError:
pyodbc_found = False
else:
pyodbc_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
class CannotDropError(Exception):
pass
# module specific functions
def get_role_facts(cursor, role=''):
facts = {}
cursor.execute("""
select r.name, r.assigned_roles
from roles r
where (? = '' or r.name ilike ?)
""", role, role)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
role_key = row.name.lower()
facts[role_key] = {
'name': row.name,
'assigned_roles': []}
if row.assigned_roles:
facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
return facts
def update_roles(role_facts, cursor, role,
existing, required):
for assigned_role in set(existing) - set(required):
cursor.execute("revoke {0} from {1}".format(assigned_role, role))
for assigned_role in set(required) - set(existing):
cursor.execute("grant {0} to {1}".format(assigned_role, role))
def check(role_facts, role, assigned_roles):
role_key = role.lower()
if role_key not in role_facts:
return False
if assigned_roles and sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles']):
return False
return True
def present(role_facts, cursor, role, assigned_roles):
role_key = role.lower()
if role_key not in role_facts:
cursor.execute("create role {0}".format(role))
update_roles(role_facts, cursor, role, [], assigned_roles)
role_facts.update(get_role_facts(cursor, role))
return True
else:
changed = False
if assigned_roles and (sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles'])):
update_roles(role_facts, cursor, role,
role_facts[role_key]['assigned_roles'], assigned_roles)
changed = True
if changed:
role_facts.update(get_role_facts(cursor, role))
return changed
def absent(role_facts, cursor, role, assigned_roles):
role_key = role.lower()
if role_key in role_facts:
update_roles(role_facts, cursor, role,
role_facts[role_key]['assigned_roles'], [])
cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name']))
del role_facts[role_key]
return True
else:
return False
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
role=dict(required=True, aliases=['name']),
assigned_roles=dict(default=None, aliases=['assigned_role']),
state=dict(default='present', choices=['absent', 'present']),
db=dict(default=None),
cluster=dict(default='localhost'),
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None, no_log=True),
), supports_check_mode=True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
role = module.params['role']
assigned_roles = []
if module.params['assigned_roles']:
assigned_roles = module.params['assigned_roles'].split(',')
assigned_roles = filter(None, assigned_roles)
state = module.params['state']
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception as e:
module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)))
try:
role_facts = get_role_facts(cursor)
if module.check_mode:
changed = not check(role_facts, role, assigned_roles)
elif state == 'absent':
try:
changed = absent(role_facts, cursor, role, assigned_roles)
except pyodbc.Error as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state == 'present':
try:
changed = present(role_facts, cursor, role, assigned_roles)
except pyodbc.Error as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except NotSupportedError as e:
module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts})
except CannotDropError as e:
module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts})
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts})
if __name__ == '__main__':
main()
| gpl-3.0 |
Manazius/blacksmith-bot | extensions/networktime.py | 3 | 1686 | # BS mark.1-55
# /* coding: utf-8 */
# BlackSmith plugin
# cipher_plugin.py
# Author: ferym (ferym@jabbim.org.ru)
# http://jabbrik.ru
strip_tags = re.compile(r'<[^<>]+>')
def handler_msk_time(type, source, body):
try:
body = re_search(read_url('http://www.zln.ru/time/', 'Mozilla/5.0'), '<div id="servertime" style="margin-top:40px; margin-bottom:30px; height:44px; padding:6px; width:148px; border:2px dotted #990000; font-size:36px; font-weight:bold;">', '</div>')
body = strip_tags.sub('', replace_all(body, {'<br />': '', '<br>': ''}))
day, mes, god, mes2, chas, minut, sek = time.strftime('%d.'), time.strftime('(%B)'), time.strftime('%Y'),time.strftime('%m.'), time.strftime('%H:'), time.strftime('%M:'), time.strftime('%S')
week = [u'понедельник', u'вторник', u'среда', u'четверг', u'пятница', u'суббота', u'воскресенье']
repl = u'Точное время:\n'
repl += u'Время: %s' % (unicode(body, 'windows-1251'))
repl += u'\nЧисло: %s' % (day)
repl += u'(%s' % (week[time.localtime()[6]])
repl += u')\nМесяц: %s' % (mes2+mes)
repl += u'\nГод: %s' % (god)
repl += u'\n-----'
repl += u'\nВремя на локальном сервере: %s' % (chas+minut+sek)
repl += u'\n-----'
repl += u'\n[%s' % unicode(body, 'windows-1251')
repl += u' %s' % (day+mes2+god)
repl += u'] GMT +'
repl += str(int(time.timezone)/int(3600))[1:]
if time.localtime()[8] == 1:
repl += u' (Летнее время)'
else:
repl += u' (Зимнее время)'
except:
repl = u'не вышло'
reply(type, source, repl)
command_handler(handler_msk_time, 10, "networktime")
| apache-2.0 |
cloudbase/nova | nova/cmd/all.py | 5 | 3283 | # Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for all nova services.
This script attempts to start all the nova services in one process. Each
service is started in its own greenthread. Please note that exceptions and
sys.exit() on the starting of a service are logged and the script will
continue attempting to launch the rest of the services.
"""
import sys
from oslo_log import log as logging
import nova.conf
from nova import config
from nova.i18n import _LE, _LW
from nova import objects
from nova import service
from nova import utils
from nova.vnc import xvp_proxy
CONF = nova.conf.CONF
def main():
config.parse_args(sys.argv)
logging.setup(CONF, "nova")
LOG = logging.getLogger('nova.all')
utils.monkey_patch()
objects.register_all()
launcher = service.process_launcher()
# TODO(sdague): Remove in O
LOG.warning(_LW('The nova-all entrypoint is deprecated and will '
'be removed in a future release'))
# nova-api
for api in CONF.enabled_apis:
try:
should_use_ssl = api in CONF.enabled_ssl_apis
server = service.WSGIService(api, use_ssl=should_use_ssl)
launcher.launch_service(server, workers=server.workers or 1)
except (Exception, SystemExit):
LOG.exception(_LE('Failed to load %s-api'), api)
for mod in [xvp_proxy]:
try:
launcher.launch_service(mod.get_wsgi_server())
except (Exception, SystemExit):
LOG.exception(_LE('Failed to load %s'), mod.__name__)
for binary in ['nova-compute', 'nova-network', 'nova-scheduler',
'nova-cert', 'nova-conductor']:
# FIXME(sirp): Most service configs are defined in nova/service.py, but
# conductor has set a new precedent of storing these configs
# nova/<service>/api.py.
#
# We should update the existing services to use this new approach so we
# don't have to treat conductor differently here.
if binary == 'nova-conductor':
topic = CONF.conductor.topic
manager = CONF.conductor.manager
else:
topic = None
manager = None
try:
launcher.launch_service(service.Service.create(binary=binary,
topic=topic,
manager=manager))
except (Exception, SystemExit):
LOG.exception(_LE('Failed to load %s'), binary)
launcher.wait()
| apache-2.0 |
plilja/adventofcode | 2015/day07/day07.py | 1 | 1315 | import sys
wires = {}
computed = {}
for i in range(0, 65536):
def f(j):
return lambda: j
wires[str(i)] = f(i)
for line in sys.stdin.readlines():
def cache(key, f):
def g():
if key in computed:
return computed[key]
else:
r = f()
computed[key] = r
return r
wires[key] = g
def parse(args):
if args[0] == 'NOT':
cache(args[3], lambda: ~(wires[args[1]]()) % 65536)
elif args[1] == '->':
if args[0].isdigit():
cache(args[2], lambda: int(args[0]) % 65536)
else:
cache(args[2], lambda: wires[args[0]]() % 65536)
elif args[1] == 'AND':
cache(args[4], lambda: (wires[args[0]]() & wires[args[2]]()) % 65536)
elif args[1] == 'OR':
cache(args[4], lambda: (wires[args[0]]() | wires[args[2]]()) % 65536)
elif args[1] == 'LSHIFT':
cache(args[4], lambda: (wires[args[0]]() << int(args[2])) % 65536)
elif args[1] == 'RSHIFT':
cache(args[4], lambda: (wires[args[0]]() >> int(args[2])) % 65536)
parse(line.split())
step1 = wires['a']()
computed.clear()
wires['b'] = lambda: step1
step2 = wires['a']()
print(step1)
print(step2)
| gpl-3.0 |
numenta/nupic | tests/integration/nupic/opf/opf_checkpoint_stress_test.py | 10 | 4775 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This is a stress test that saves and loads an OPF checkpoint multiple times,
doing one compute step in between. This test was put in place to catch a crash
bug.
"""
import datetime
import numpy.random
import os
import shutil
import tempfile
import unittest2 as unittest
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.support.unittesthelpers.testcasebase import TestCaseBase
# Model parameters derived from the Hotgym anomaly example. This example was
# used because it uses the most components. Some of the parameters, such
# as columnCount were reduced to make the test run faster.
MODEL_PARAMS = {
'model': "HTMPrediction",
'version': 1,
'aggregationInfo': { 'days': 0,
'fields': [(u'c1', 'sum'), (u'c0', 'first')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
'modelParams': {
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
'verbosity' : 0,
'encoders': {
u'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'maxval': 100.0,
'minval': 0.0,
'n': 50,
'name': u'c1',
'type': 'ScalarEncoder',
'w': 21},},
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
'spVerbosity' : 0,
'globalInhibition': 1,
'spatialImp' : 'cpp',
'columnCount': 512,
'inputWidth': 0,
'numActiveColumnsPerInhArea': 20,
'seed': 1956,
'potentialPct': 0.5,
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.005,
},
'tmEnable' : True,
'tmParams': {
'verbosity': 0,
'columnCount': 512,
'cellsPerColumn': 8,
'inputWidth': 512,
'seed': 1960,
'temporalImp': 'cpp',
'newSynapseCount': 10,
'maxSynapsesPerSegment': 20,
'maxSegmentsPerCell': 32,
'initialPerm': 0.21,
'permanenceInc': 0.1,
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
'minThreshold': 4,
'activationThreshold': 6,
'outputType': 'normal',
'pamLength': 1,
},
'clParams': {
'regionName' : 'SDRClassifierRegion',
'verbosity' : 0,
'alpha': 0.005,
'steps': '1,5',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': 2184},
'trainSPNetOnlyIfRequested': False,
},
}
class CheckpointStressTest(TestCaseBase):
def testCheckpoint(self):
tmpDir = tempfile.mkdtemp()
model = ModelFactory.create(MODEL_PARAMS)
model.enableInference({'predictedField': 'consumption'})
headers = ['timestamp', 'consumption']
# Now do a bunch of small load/train/save batches
for _ in range(20):
for _ in range(2):
record = [datetime.datetime(2013, 12, 12), numpy.random.uniform(100)]
modelInput = dict(zip(headers, record))
model.run(modelInput)
# Save and load a checkpoint after each batch. Clean up.
tmpBundleName = os.path.join(tmpDir, "test_checkpoint")
self.assertIs(model.save(tmpBundleName), None, "Save command failed.")
model = ModelFactory.loadFromCheckpoint(tmpBundleName)
shutil.rmtree(tmpBundleName)
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
Distrotech/intellij-community | python/helpers/docutils/readers/pep.py | 61 | 1554 | # $Id: pep.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Python Enhancement Proposal (PEP) Reader.
"""
__docformat__ = 'reStructuredText'
from docutils.readers import standalone
from docutils.transforms import peps, references, misc, frontmatter
from docutils.parsers import rst
class Reader(standalone.Reader):
supported = ('pep',)
"""Contexts this reader supports."""
settings_spec = (
'PEP Reader Option Defaults',
'The --pep-references and --rfc-references options (for the '
'reStructuredText parser) are on by default.',
())
config_section = 'pep reader'
config_section_dependencies = ('readers', 'standalone reader')
def get_transforms(self):
transforms = standalone.Reader.get_transforms(self)
# We have PEP-specific frontmatter handling.
transforms.remove(frontmatter.DocTitle)
transforms.remove(frontmatter.SectionSubTitle)
transforms.remove(frontmatter.DocInfo)
transforms.extend([peps.Headers, peps.Contents, peps.TargetNotes])
return transforms
settings_default_overrides = {'pep_references': 1, 'rfc_references': 1}
inliner_class = rst.states.Inliner
def __init__(self, parser=None, parser_name=None):
"""`parser` should be ``None``."""
if parser is None:
parser = rst.Parser(rfc2822=1, inliner=self.inliner_class())
standalone.Reader.__init__(self, parser, '')
| apache-2.0 |
OPM/ResInsight | GrpcInterface/Python/rips/PythonExamples/create_wbs_plot.py | 1 | 1383 | import os
import grpc
# Load ResInsight Processing Server Client Library
import rips
# Connect to ResInsight instance
resInsight = rips.Instance.find()
# Get all GeoMech cases
cases = resInsight.project.descendants(rips.GeoMechCase)
# Get all well paths
well_paths = resInsight.project.well_paths()
# Ensure there's at least one well path
if len(well_paths) < 1:
print("No well paths in project")
exit(1)
# Create a set of WbsParameters
params = rips.WbsParameters()
params.user_poisson_ratio = 0.23456
params.user_ucs = 123
# Loop through all cases
for case in cases:
assert isinstance(case, rips.GeoMechCase)
min_res_depth, max_res_depth = case.reservoir_depth_range()
# Find a good output path
case_path = case.file_path
folder_name = os.path.dirname(case_path)
# Import formation names
case.import_formation_names(
formation_files=[
"D:/Projects/ResInsight-regression-test/ModelData/norne/Norne_ATW2013.lyr"
]
)
# create a folder to hold the snapshots
dirname = os.path.join(folder_name, "snapshots")
print("Exporting to: " + dirname)
for well_path in well_paths[0:4]: # Loop through the first five well paths
# Create plot with parameters
wbsplot = case.create_well_bore_stability_plot(
well_path=well_path.name, time_step=0, parameters=params
)
| gpl-3.0 |
anfedorov/psutil | examples/disk_usage.py | 44 | 2031 | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
List all mounted disk partitions a-la "df -h" command.
$ python examples/disk_usage.py
Device Total Used Free Use % Type Mount
/dev/sdb3 18.9G 14.7G 3.3G 77% ext4 /
/dev/sda6 345.9G 83.8G 244.5G 24% ext4 /home
/dev/sda1 296.0M 43.1M 252.9M 14% vfat /boot/efi
/dev/sda2 600.0M 312.4M 287.6M 52% fuseblk /media/Recovery
"""
import sys
import os
import psutil
from psutil._compat import print_
def bytes2human(n):
# http://code.activestate.com/recipes/578019
# >>> bytes2human(10000)
# '9.8K'
# >>> bytes2human(100001221)
# '95.4M'
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%sB" % n
def main():
templ = "%-17s %8s %8s %8s %5s%% %9s %s"
print_(templ % ("Device", "Total", "Used", "Free", "Use ", "Type",
"Mount"))
for part in psutil.disk_partitions(all=False):
if os.name == 'nt':
if 'cdrom' in part.opts or part.fstype == '':
# skip cd-rom drives with no disk in it; they may raise
# ENOENT, pop-up a Windows GUI error for a non-ready
# partition or just hang.
continue
usage = psutil.disk_usage(part.mountpoint)
print_(templ % (
part.device,
bytes2human(usage.total),
bytes2human(usage.used),
bytes2human(usage.free),
int(usage.percent),
part.fstype,
part.mountpoint))
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
dapuck/pyleus | pyleus/storm/__init__.py | 7 | 1740 | """Package containing pyleus implementation of major Storm entities.
"""
from __future__ import absolute_import
from collections import namedtuple
DEFAULT_STREAM = "default"
LOG_TRACE = 0
LOG_DEBUG = 1
LOG_INFO = 2
LOG_WARN = 3
LOG_ERROR = 4
StormTuple = namedtuple('StormTuple', "id comp stream task values")
"""Namedtuple representing a Storm tuple.
* **id**\(``str`` or ``long``): tuple identifier
* **comp**\(``str``): name of the emitting component
* **stream**\(``str``): name of the input stream the tuple belongs to
* **values**\(``tuple``): values contained by the tuple
"""
def is_tick(tup):
"""Tell whether the tuple is a tick tuple or not.
:param tup: tuple to investigate
:type tup: :class:`~.StormTuple`
:return: ``True`` if the tuple is a tick tuple, ``False`` otherwise
:rtype: ``bool``
"""
# Tick tuples (generated by Storm; introduced 0.8) are defined as being
# from the __system component and __tick stream.
return tup.comp == '__system' and tup.stream == '__tick'
def is_heartbeat(tup):
"""Tell whether the tuple is a heartbeat tuple or not.
:param tup: tuple to investigate
:type tup: :class:`~.StormTuple`
:return: ``True`` if the tuple is a heartbeat tuple, ``False`` otherwise
:rtype: ``bool``
"""
return tup.task == -1 and tup.stream == '__heartbeat'
class StormWentAwayError(Exception):
"""Raised when the connection between the component and Storm terminates.
"""
def __init__(self):
message = "Got EOF while reading from Storm"
super(StormWentAwayError, self).__init__(message)
from pyleus.storm.bolt import Bolt, SimpleBolt
from pyleus.storm.spout import Spout
_ = [Bolt, SimpleBolt, Spout] # pyflakes
| apache-2.0 |
hpssjellis/deeplearnjs-javascript-examples | Unordered-tensorflow-examples/aymericdamien-Examples/examples/linear_regression.py | 7 | 2600 | '''
A linear regression learning algorithm example using TensorFlow library.
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
rng = numpy.random
# Parameters
learning_rate = 0.01
training_epochs = 2000
display_step = 50
# Training Data
train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_samples = train_X.shape[0]
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Create Model
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
activation = tf.add(tf.mul(X, W), b)
# Minimize the squared errors
cost = tf.reduce_sum(tf.pow(activation-Y, 2))/(2*n_samples) #L2 loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
#Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \
"W=", sess.run(W), "b=", sess.run(b)
print "Optimization Finished!"
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print "Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n'
# Testing example, as requested (Issue #2)
test_X = numpy.asarray([6.83,4.668,8.9,7.91,5.7,8.7,3.1,2.1])
test_Y = numpy.asarray([1.84,2.273,3.2,2.831,2.92,3.24,1.35,1.03])
print "Testing... (L2 loss Comparison)"
testing_cost = sess.run(tf.reduce_sum(tf.pow(activation-Y, 2))/(2*test_X.shape[0]),
feed_dict={X: test_X, Y: test_Y}) #same function as cost above
print "Testing cost=", testing_cost
print "Absolute l2 loss difference:", abs(training_cost - testing_cost)
#Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(test_X, test_Y, 'bo', label='Testing data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show() | mit |
sup95/zulip | confirmation/views.py | 31 | 1660 | # -*- coding: utf-8 -*-
# Copyright: (c) 2008, Jarek Zgoda <jarek.zgoda@gmail.com>
__revision__ = '$Id: views.py 21 2008-12-05 09:21:03Z jarek.zgoda $'
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from confirmation.models import Confirmation
from zerver.models import PreregistrationUser
from zproject.jinja2 import render_to_response
def confirm(request, confirmation_key):
# type: (HttpRequest, str) -> HttpResponse
confirmation_key = confirmation_key.lower()
obj = Confirmation.objects.confirm(confirmation_key)
confirmed = True
if not obj:
# confirmation failed
confirmed = False
try:
# try to get the object we was supposed to confirm
obj = Confirmation.objects.get(confirmation_key=confirmation_key)
except Confirmation.DoesNotExist:
pass
ctx = {
'object': obj,
'confirmed': confirmed,
'days': getattr(settings, 'EMAIL_CONFIRMATION_DAYS', 10),
'key': confirmation_key,
'full_name': request.GET.get("full_name", None),
'support_email': settings.ZULIP_ADMINISTRATOR,
'verbose_support_offers': settings.VERBOSE_SUPPORT_OFFERS,
}
templates = [
'confirmation/confirm.html',
]
if obj and isinstance(obj, (PreregistrationUser, Confirmation)):
# if we have an object, we can use specific template
templates.insert(0, 'confirmation/confirm_%s.html' % (obj._meta.model_name,))
return render_to_response(templates, ctx, request=request)
| apache-2.0 |
amolenaar/gaphor | gaphor/SysML/blocks/block.py | 1 | 4366 | from gaphor.core import gettext
from gaphor.core.modeling.properties import attribute
from gaphor.diagram.presentation import (
Classified,
ElementPresentation,
from_package_str,
)
from gaphor.diagram.shapes import (
Box,
EditableText,
FontStyle,
FontWeight,
Text,
TextAlign,
VerticalAlign,
draw_border,
draw_top_separator,
)
from gaphor.diagram.support import represents
from gaphor.SysML.sysml import Block
from gaphor.UML.classes.klass import (
attribute_watches,
stereotype_compartments,
stereotype_watches,
)
from gaphor.UML.modelfactory import stereotypes_str
from gaphor.UML.umlfmt import format_attribute
@represents(Block)
class BlockItem(ElementPresentation[Block], Classified):
def __init__(self, id=None, model=None):
super().__init__(id, model)
self.watch("show_stereotypes", self.update_shapes).watch(
"show_parts", self.update_shapes
).watch("show_references", self.update_shapes).watch(
"subject[NamedElement].name"
).watch(
"subject[NamedElement].namespace.name"
).watch(
"subject[Classifier].isAbstract", self.update_shapes
).watch(
"subject[Class].ownedAttribute.aggregation", self.update_shapes
)
attribute_watches(self, "Block")
stereotype_watches(self)
show_stereotypes: attribute[int] = attribute("show_stereotypes", int)
show_parts: attribute[int] = attribute("show_parts", int, default=False)
show_references: attribute[int] = attribute("show_references", int, default=False)
def update_shapes(self, event=None):
self.shape = Box(
Box(
Text(
text=lambda: stereotypes_str(self.subject, ["block"]),
style={"min-width": 0, "min-height": 0},
),
EditableText(
text=lambda: self.subject.name or "",
width=lambda: self.width - 4,
style={
"font-weight": FontWeight.BOLD,
"font-style": FontStyle.ITALIC
if self.subject and self.subject.isAbstract
else FontStyle.NORMAL,
},
),
Text(
text=lambda: from_package_str(self),
style={"font-size": 10, "min-width": 0, "min-height": 0},
),
style={"padding": (12, 4, 12, 4)},
),
*(
self.show_parts
and self.subject
and [
self.block_compartment(
gettext("parts"),
lambda a: a.association and a.aggregation == "composite",
)
]
or []
),
*(
self.show_references
and self.subject
and [
self.block_compartment(
gettext("references"),
lambda a: a.association and a.aggregation != "composite",
)
]
or []
),
*(self.show_stereotypes and stereotype_compartments(self.subject) or []),
style={
"min-width": 100,
"min-height": 50,
"vertical-align": VerticalAlign.TOP,
},
draw=draw_border,
)
def block_compartment(self, name, predicate):
# We need to fix the attribute value, since the for loop changes it.
def lazy_format(attribute):
return lambda: format_attribute(attribute) or gettext("unnamed")
return Box(
Text(
text=name,
style={
"padding": (0, 0, 4, 0),
"font-size": 10,
"font-style": FontStyle.ITALIC,
},
),
*(
Text(text=lazy_format(attribute), style={"text-align": TextAlign.LEFT})
for attribute in self.subject.ownedAttribute
if predicate(attribute)
),
style={"padding": (4, 4, 4, 4), "min-height": 8},
draw=draw_top_separator,
)
| lgpl-2.1 |
aron-bordin/kivy | examples/widgets/textinput.py | 81 | 1860 | '''
Textinput tests
===============
This test is used to demonstrate virtual keyboard according to current
configuration.
Run this test as::
# use dock virtual keyboard (one instance)
python textinput.py -c kivy:keyboard_mode:dock
# use multi users virtual keyboard (multiples instance)
python textinput.py -c kivy:keyboard_mode:multi
# use system keyboard (one instance)
python textinput.py -c kivy:keyboard_mode:system
# use automatic detection from current platform
python textinput.py -c kivy:keyboard_mode:
'''
import kivy
kivy.require('1.0.8')
from kivy.core.window import Window
from kivy.uix.textinput import TextInput
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.scatter import Scatter
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.config import Config
from kivy.base import runTouchApp
if __name__ == '__main__':
root = FloatLayout()
# create a button to release everything
def release_all_keyboard(*l):
Window.release_all_keyboards()
btn = Button(text='Release\nall\nkeyboards', size_hint=(None, None),
halign='center')
btn.bind(on_release=release_all_keyboard)
root.add_widget(btn)
# show current configuration
lbl = 'Configuration keyboard_mode is %r, keyboard_layout is %r' % (
Config.get('kivy', 'keyboard_mode'),
Config.get('kivy', 'keyboard_layout'))
label = Label(text=lbl, size_hint_y=None, height=50, pos_hint={'top': 1})
root.add_widget(label)
s = Scatter(size_hint=(None, None), pos=(300, 300))
s.add_widget(TextInput(size_hint=(None, None), size=(100, 50)))
root.add_widget(s)
s = Scatter(size_hint=(None, None), pos=(400, 300), rotation=45)
s.add_widget(TextInput(size_hint=(None, None), size=(100, 50)))
root.add_widget(s)
runTouchApp(root)
| mit |
cathywu/flow | flow/scenarios/loop_merge.py | 1 | 8343 | """Contains the loop merge scenario class."""
from flow.scenarios.base_scenario import Scenario
from flow.core.params import InitialConfig
from flow.core.params import TrafficLightParams
from numpy import pi, sin, cos, linspace
ADDITIONAL_NET_PARAMS = {
# radius of the loops
"ring_radius": 50,
# length of the straight edges connected the outer loop to the inner loop
"lane_length": 75,
# number of lanes in the inner loop
"inner_lanes": 3,
# number of lanes in the outer loop
"outer_lanes": 2,
# max speed limit in the network
"speed_limit": 30,
# resolution of the curved portions
"resolution": 40,
}
class TwoLoopsOneMergingScenario(Scenario):
"""Two loop merge scenario.
This network is expected to simulate a closed loop representation of a
merge. It consists of two rings that merge together for half the length of
the smaller ring.
Requires from net_params:
* **ring_radius** : radius of the loops
* **lane_length** : length of the straight edges connected the outer loop
to the inner loop
* **inner_lanes** : number of lanes in the inner loop
* **outer_lanes** : number of lanes in the outer loop
* **speed_limit** : max speed limit in the network
* **resolution** : resolution of the curved portions
Usage
-----
>>> from flow.core.params import NetParams
>>> from flow.core.params import VehicleParams
>>> from flow.core.params import InitialConfig
>>> from flow.scenarios import TwoLoopsOneMergingScenario
>>>
>>> scenario = TwoLoopsOneMergingScenario(
>>> name='two_loops_merge',
>>> vehicles=VehicleParams(),
>>> net_params=NetParams(
>>> additional_params={
>>> 'ring_radius': 50,
>>> 'lane_length': 75,
>>> 'inner_lanes': 3,
>>> 'outer_lanes': 2,
>>> 'speed_limit': 30,
>>> 'resolution': 40
>>> },
>>> )
>>> )
"""
def __init__(self,
name,
vehicles,
net_params,
initial_config=InitialConfig(),
traffic_lights=TrafficLightParams()):
"""Initialize a two loop scenario."""
for p in ADDITIONAL_NET_PARAMS.keys():
if p not in net_params.additional_params:
raise KeyError('Network parameter "{}" not supplied'.format(p))
radius = net_params.additional_params["ring_radius"]
x = net_params.additional_params["lane_length"]
self.inner_lanes = net_params.additional_params["inner_lanes"]
self.outer_lanes = net_params.additional_params["outer_lanes"]
self.junction_length = 0.3
self.intersection_length = 25.5 # calibrate when the radius changes
net_params.additional_params["length"] = \
2 * x + 2 * pi * radius + \
2 * self.intersection_length + 2 * self.junction_length
num_vehicles = vehicles.num_vehicles
num_merge_vehicles = sum("merge" in vehicles.get_type(veh_id)
for veh_id in vehicles.ids)
self.n_inner_vehicles = num_merge_vehicles
self.n_outer_vehicles = num_vehicles - num_merge_vehicles
radius = net_params.additional_params["ring_radius"]
length_loop = 2 * pi * radius
self.length_loop = length_loop
super().__init__(name, vehicles, net_params, initial_config,
traffic_lights)
def specify_nodes(self, net_params):
"""See parent class."""
r = net_params.additional_params["ring_radius"]
x = net_params.additional_params["lane_length"]
nodes = [{
"id": "top_left",
"x": 0,
"y": r,
"type": "priority"
}, {
"id": "bottom_left",
"x": 0,
"y": -r,
"type": "priority"
}, {
"id": "top_right",
"x": x,
"y": r,
"type": "priority"
}, {
"id": "bottom_right",
"x": x,
"y": -r,
"type": "priority"
}]
return nodes
def specify_edges(self, net_params):
"""See parent class."""
r = net_params.additional_params["ring_radius"]
x = net_params.additional_params["lane_length"]
ring_edgelen = pi * r
resolution = 40
edges = [{
"id":
"center",
"from":
"bottom_left",
"to":
"top_left",
"type":
"edgeType",
"length":
ring_edgelen,
"priority":
46,
"shape":
[
(r * cos(t), r * sin(t))
for t in linspace(-pi / 2, pi / 2, resolution)
],
"numLanes":
self.inner_lanes
}, {
"id": "top",
"from": "top_right",
"to": "top_left",
"type": "edgeType",
"length": x,
"priority": 46,
"numLanes": self.outer_lanes
}, {
"id": "bottom",
"from": "bottom_left",
"to": "bottom_right",
"type": "edgeType",
"length": x,
"numLanes": self.outer_lanes
}, {
"id":
"left",
"from":
"top_left",
"to":
"bottom_left",
"type":
"edgeType",
"length":
ring_edgelen,
"shape":
[
(r * cos(t), r * sin(t))
for t in linspace(pi / 2, 3 * pi / 2, resolution)
],
"numLanes":
self.inner_lanes
}, {
"id":
"right",
"from":
"bottom_right",
"to":
"top_right",
"type":
"edgeType",
"length":
ring_edgelen,
"shape":
[
(x + r * cos(t), r * sin(t))
for t in linspace(-pi / 2, pi / 2, resolution)
],
"numLanes":
self.outer_lanes
}]
return edges
def specify_types(self, net_params):
"""See parent class."""
speed_limit = net_params.additional_params["speed_limit"]
types = [{"id": "edgeType", "speed": speed_limit}]
return types
def specify_routes(self, net_params):
"""See parent class."""
rts = {
"top": ["top", "left", "bottom", "right", "top"],
"bottom": ["bottom", "right", "top", "left", "bottom"],
"right": ["right", "top", "left", "bottom"],
"left": ["left", "center", "left"],
"center": ["center", "left", "center"]
}
return rts
def specify_edge_starts(self):
"""See parent class."""
r = self.net_params.additional_params["ring_radius"]
lane_length = self.net_params.additional_params["lane_length"]
ring_edgelen = pi * r
edgestarts = [
("left", self.intersection_length),
("center", ring_edgelen + 2 * self.intersection_length),
("bottom", 2 * ring_edgelen + 2 * self.intersection_length),
("right", 2 * ring_edgelen + lane_length +
2 * self.intersection_length + self.junction_length),
("top", 3 * ring_edgelen + lane_length +
2 * self.intersection_length + 2 * self.junction_length)
]
return edgestarts
def specify_internal_edge_starts(self):
"""See parent class."""
r = self.net_params.additional_params["ring_radius"]
lane_length = self.net_params.additional_params["lane_length"]
ring_edgelen = pi * r
internal_edgestarts = [
(":top_left", 0), (":bottom_left",
ring_edgelen + self.intersection_length),
(":bottom_right",
2 * ring_edgelen + lane_length + 2 * self.intersection_length),
(":top_right", 3 * ring_edgelen + lane_length +
2 * self.intersection_length + self.junction_length)
]
return internal_edgestarts
| mit |
h2oai/h2o-dev | h2o-py/tests/testdir_hdfs/pyunit_INTERNAL_HDFS_airlines_glrm_profile.py | 4 | 3636 | from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
import time
from tests import pyunit_utils
from h2o.estimators.glrm import H2OGeneralizedLowRankEstimator
#----------------------------------------------------------------------
# Purpose: This test is to run GLRM on airline data and measure
# how fast it can run with the various optimization methods that we
# are looking at.
#----------------------------------------------------------------------
def hdfs_orc_parser():
# Check if we are running inside the H2O network by seeing if we can touch
# the namenode.
hadoop_namenode_is_accessible = pyunit_utils.hadoop_namenode_is_accessible()
if hadoop_namenode_is_accessible:
numElements2Compare = 10
tol_time = 200
tol_numeric = 1e-5
hdfs_name_node = pyunit_utils.hadoop_namenode()
hdfs_csv_file = "/datasets/air_csv_part"
col_types = ['real', 'real', 'real', 'real', 'real', 'real', 'real', 'real', 'enum', 'real', 'enum', 'real',
'real', 'enum', 'real', 'real', 'enum', 'enum', 'real', 'enum', 'enum', 'real', 'real', 'real',
'enum', 'enum', 'enum', 'enum', 'enum', 'enum', 'enum']
# import CSV file
print("Import airlines 116M dataset in original csv format from HDFS")
url_csv = "hdfs://{0}{1}".format(hdfs_name_node, hdfs_csv_file)
acs_orig = h2o.import_file(url_csv, na_strings=['\\N'], col_types=col_types)
print("Data size number of rows: {0}, number of columns: {1}".format(acs_orig.nrow, acs_orig.ncol))
seeds = [2297378124, 3849570216, 6733652048, 8915337442, 8344418400, 9416580152, 2598632624, 4977008454, 8273228579,
8185554539, 3219125000, 2998879373, 7707012513, 5786923379, 5029788935, 935945790, 7092607078, 9305834745,
6173975590, 5397294255]
run_time_ms = []
iterations = []
objective = []
num_runs = 1 # number of times to repeat experiments
for ind in range(num_runs):
acs_model = H2OGeneralizedLowRankEstimator(k = 10,
transform = 'STANDARDIZE',
loss = 'Quadratic',
multi_loss="Categorical",
model_id="clients_core_glrm",
regularization_x="L2",
regularization_y="L1",
gamma_x=0.2,
gamma_y=0.5,
init="SVD",
max_iterations = 200,
seed=seeds[ind % len(seeds)])
acs_model.train(x = acs_orig.names, training_frame= acs_orig, seed=seeds[ind % len(seeds)])
run_time_ms.append(acs_model._model_json['output']['end_time'] - acs_model._model_json['output']['start_time'])
iterations.append(acs_model._model_json['output']['iterations'])
objective.append(acs_model._model_json['output']['objective'])
print("Run time in ms: {0}".format(run_time_ms))
print("number of iterations: {0}".format(iterations))
print("objective function value: {0}".format(objective))
sys.stdout.flush()
else:
raise EnvironmentError
if __name__ == "__main__":
pyunit_utils.standalone_test(hdfs_orc_parser)
else:
hdfs_orc_parser() | apache-2.0 |
jtux270/translate | FreeIPA/freeipa-3.0.0/ipalib/plugins/misc.py | 2 | 4028 | # Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
from ipalib import api, LocalOrRemote, _, ngettext
from ipalib.output import Output, summary
from ipalib import Flag
__doc__ = _("""
Misc plug-ins
""")
# FIXME: We should not let env return anything in_server
# when mode == 'production'. This would allow an attacker to see the
# configuration of the server, potentially revealing compromising
# information. However, it's damn handy for testing/debugging.
class env(LocalOrRemote):
__doc__ = _('Show environment variables.')
msg_summary = _('%(count)d variables')
takes_args = (
'variables*',
)
takes_options = LocalOrRemote.takes_options + (
Flag('all',
cli_name='all',
doc=_('retrieve and print all attributes from the server. Affects command output.'),
exclude='webui',
flags=['no_output'],
default=True,
),
)
has_output = (
Output('result',
type=dict,
doc=_('Dictionary mapping variable name to value'),
),
Output('total',
type=int,
doc=_('Total number of variables env (>= count)'),
flags=['no_display'],
),
Output('count',
type=int,
doc=_('Number of variables returned (<= total)'),
flags=['no_display'],
),
summary,
)
def __find_keys(self, variables):
keys = set()
for query in variables:
if '*' in query:
pat = re.compile(query.replace('*', '.*') + '$')
for key in self.env:
if pat.match(key):
keys.add(key)
elif query in self.env:
keys.add(query)
return keys
def execute(self, variables, **options):
if variables is None:
keys = self.env
else:
keys = self.__find_keys(variables)
ret = dict(
result=dict(
(key, self.env[key]) for key in keys
),
count=len(keys),
total=len(self.env),
)
if len(keys) > 1:
ret['summary'] = self.msg_summary % ret
else:
ret['summary'] = None
return ret
api.register(env)
class plugins(LocalOrRemote):
__doc__ = _('Show all loaded plugins.')
msg_summary = ngettext(
'%(count)d plugin loaded', '%(count)d plugins loaded', 0
)
takes_options = LocalOrRemote.takes_options + (
Flag('all',
cli_name='all',
doc=_('retrieve and print all attributes from the server. Affects command output.'),
exclude='webui',
flags=['no_output'],
default=True,
),
)
has_output = (
Output('result', dict, 'Dictionary mapping plugin names to bases'),
Output('count',
type=int,
doc=_('Number of plugins loaded'),
),
summary,
)
def execute(self, **options):
plugins = sorted(self.api.plugins, key=lambda o: o.plugin)
return dict(
result=dict(
(p.plugin, p.bases) for p in plugins
),
count=len(plugins),
)
api.register(plugins)
| gpl-3.0 |
317070/kaggle-heart | configurations/je_ss_lincrps_nrmsc_dropoutput.py | 1 | 8363 | """Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 10
validate_train_set = True
save_every = 10
restart_from_save = False
dump_network_loaded_data = False
# Training (schedule) parameters
# - batch sizes
batch_size = 32
sunny_batch_size = 4
batches_per_chunk = 16
AV_SLICE_PER_PAT = 11
num_epochs_train = 50 * AV_SLICE_PER_PAT
# - learning rate and method
base_lr = .00003
learning_rate_schedule = {
0: base_lr,
4*num_epochs_train/5: base_lr/10,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotation": (-16, 16),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
}
preprocess_train = preprocess.preprocess_normscale
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
# Input sizes
image_size = 128
data_sizes = {
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 100 * AV_SLICE_PER_PAT # More augmentations since a we only use single slices
# Architecture
poselu = lambda x: T.switch(x > 0, x + 1, T.exp(x))
def build_model():
#################
# Regular model #
#################
input_size = data_sizes["sliced:data:singleslice"]
l0 = nn.layers.InputLayer(input_size)
l1a = nn.layers.dnn.Conv2DDNNLayer(l0, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1b = nn.layers.dnn.Conv2DDNNLayer(l1a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2,2), stride=(2,2))
l2a = nn.layers.dnn.Conv2DDNNLayer(l1, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2b = nn.layers.dnn.Conv2DDNNLayer(l2a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2,2), stride=(2,2))
l3a = nn.layers.dnn.Conv2DDNNLayer(l2, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3b = nn.layers.dnn.Conv2DDNNLayer(l3a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3c = nn.layers.dnn.Conv2DDNNLayer(l3b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2,2), stride=(2,2))
l4a = nn.layers.dnn.Conv2DDNNLayer(l3, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4b = nn.layers.dnn.Conv2DDNNLayer(l4a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4c = nn.layers.dnn.Conv2DDNNLayer(l4b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2,2), stride=(2,2))
l5a = nn.layers.dnn.Conv2DDNNLayer(l4, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5b = nn.layers.dnn.Conv2DDNNLayer(l5a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5c = nn.layers.dnn.Conv2DDNNLayer(l5b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5c, pool_size=(2,2), stride=(2,2))
# Systole Dense layers
ldsys1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
ldsys2 = nn.layers.DenseLayer(ldsys1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys2drop = nn.layers.dropout(ldsys2, p=0.5)
ldsys3 = nn.layers.DenseLayer(ldsys2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=None)
ldsys3drop = nn.layers.dropout(ldsys3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate
ldsys3norm = layers.NormalisationLayer(ldsys3drop, allow_negative=True)
l_systole = layers.CumSumLayer(ldsys3norm)
# Diastole Dense layers
lddia1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia1drop = nn.layers.dropout(lddia1, p=0.5)
lddia2 = nn.layers.DenseLayer(lddia1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia2drop = nn.layers.dropout(lddia2, p=0.5)
lddia3 = nn.layers.DenseLayer(lddia2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=None)
lddia3drop = nn.layers.dropout(lddia3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate
lddia3norm = layers.NormalisationLayer(lddia3drop, allow_negative=True)
l_diastole = layers.CumSumLayer(lddia3norm)
return {
"inputs":{
"sliced:data:singleslice": l0
},
"outputs": {
"systole": l_systole,
"diastole": l_diastole,
},
"regularizable": {
ldsys1: l2_weight,
ldsys2: l2_weight,
ldsys3: l2_weight_out,
lddia1: l2_weight,
lddia2: l2_weight,
lddia3: l2_weight_out,
},
}
| mit |
oshoemaker/xcleague | public/assets/plugins/vector-map/converter/simplifier.py | 234 | 5985 | import argparse
import sys
import os
from osgeo import ogr
from osgeo import osr
import anyjson
import shapely.geometry
import shapely.ops
import codecs
import time
format = '%.8f %.8f'
tolerance = 0.01
infile = '/Users/kirilllebedev/Maps/50m-admin-0-countries/ne_50m_admin_0_countries.shp'
outfile = 'map.shp'
# Open the datasource to operate on.
in_ds = ogr.Open( infile, update = 0 )
in_layer = in_ds.GetLayer( 0 )
in_defn = in_layer.GetLayerDefn()
# Create output file with similar information.
shp_driver = ogr.GetDriverByName( 'ESRI Shapefile' )
if os.path.exists('map.shp'):
shp_driver.DeleteDataSource( outfile )
shp_ds = shp_driver.CreateDataSource( outfile )
shp_layer = shp_ds.CreateLayer( in_defn.GetName(),
geom_type = in_defn.GetGeomType(),
srs = in_layer.GetSpatialRef() )
in_field_count = in_defn.GetFieldCount()
for fld_index in range(in_field_count):
src_fd = in_defn.GetFieldDefn( fld_index )
fd = ogr.FieldDefn( src_fd.GetName(), src_fd.GetType() )
fd.SetWidth( src_fd.GetWidth() )
fd.SetPrecision( src_fd.GetPrecision() )
shp_layer.CreateField( fd )
# Load geometries
geometries = []
for feature in in_layer:
geometry = feature.GetGeometryRef()
geometryType = geometry.GetGeometryType()
if geometryType == ogr.wkbPolygon or geometryType == ogr.wkbMultiPolygon:
shapelyGeometry = shapely.wkb.loads( geometry.ExportToWkb() )
#if not shapelyGeometry.is_valid:
#buffer to fix selfcrosses
#shapelyGeometry = shapelyGeometry.buffer(0)
if shapelyGeometry:
geometries.append(shapelyGeometry)
in_layer.ResetReading()
start = int(round(time.time() * 1000))
# Simplification
points = []
connections = {}
counter = 0
for geom in geometries:
counter += 1
polygons = []
if isinstance(geom, shapely.geometry.Polygon):
polygons.append(geom)
else:
for polygon in geom:
polygons.append(polygon)
for polygon in polygons:
if polygon.area > 0:
lines = []
lines.append(polygon.exterior)
for line in polygon.interiors:
lines.append(line)
for line in lines:
for i in range(len(line.coords)-1):
indexFrom = i
indexTo = i+1
pointFrom = format % line.coords[indexFrom]
pointTo = format % line.coords[indexTo]
if pointFrom == pointTo:
continue
if not (pointFrom in connections):
connections[pointFrom] = {}
connections[pointFrom][pointTo] = 1
if not (pointTo in connections):
connections[pointTo] = {}
connections[pointTo][pointFrom] = 1
print int(round(time.time() * 1000)) - start
simplifiedLines = {}
pivotPoints = {}
def simplifyRing(ring):
coords = list(ring.coords)[0:-1]
simpleCoords = []
isPivot = False
pointIndex = 0
while not isPivot and pointIndex < len(coords):
pointStr = format % coords[pointIndex]
pointIndex += 1
isPivot = ((len(connections[pointStr]) > 2) or (pointStr in pivotPoints))
pointIndex = pointIndex - 1
if not isPivot:
simpleRing = shapely.geometry.LineString(coords).simplify(tolerance)
if len(simpleRing.coords) <= 2:
return None
else:
pivotPoints[format % coords[0]] = True
pivotPoints[format % coords[-1]] = True
simpleLineKey = format % coords[0]+':'+format % coords[1]+':'+format % coords[-1]
simplifiedLines[simpleLineKey] = simpleRing.coords
return simpleRing
else:
points = coords[pointIndex:len(coords)]
points.extend(coords[0:pointIndex+1])
iFrom = 0
for i in range(1, len(points)):
pointStr = format % points[i]
if ((len(connections[pointStr]) > 2) or (pointStr in pivotPoints)):
line = points[iFrom:i+1]
lineKey = format % line[-1]+':'+format % line[-2]+':'+format % line[0]
if lineKey in simplifiedLines:
simpleLine = simplifiedLines[lineKey]
simpleLine = list(reversed(simpleLine))
else:
simpleLine = shapely.geometry.LineString(line).simplify(tolerance).coords
lineKey = format % line[0]+':'+format % line[1]+':'+format % line[-1]
simplifiedLines[lineKey] = simpleLine
simpleCoords.extend( simpleLine[0:-1] )
iFrom = i
if len(simpleCoords) <= 2:
return None
else:
return shapely.geometry.LineString(simpleCoords)
def simplifyPolygon(polygon):
simpleExtRing = simplifyRing(polygon.exterior)
if simpleExtRing is None:
return None
simpleIntRings = []
for ring in polygon.interiors:
simpleIntRing = simplifyRing(ring)
if simpleIntRing is not None:
simpleIntRings.append(simpleIntRing)
return shapely.geometry.Polygon(simpleExtRing, simpleIntRings)
results = []
for geom in geometries:
polygons = []
simplePolygons = []
if isinstance(geom, shapely.geometry.Polygon):
polygons.append(geom)
else:
for polygon in geom:
polygons.append(polygon)
for polygon in polygons:
simplePolygon = simplifyPolygon(polygon)
if not (simplePolygon is None or simplePolygon._geom is None):
simplePolygons.append(simplePolygon)
if len(simplePolygons) > 0:
results.append(shapely.geometry.MultiPolygon(simplePolygons))
else:
results.append(None)
# Process all features in input layer.
in_feat = in_layer.GetNextFeature()
counter = 0
while in_feat is not None:
if results[counter] is not None:
out_feat = ogr.Feature( feature_def = shp_layer.GetLayerDefn() )
out_feat.SetFrom( in_feat )
out_feat.SetGeometryDirectly(
ogr.CreateGeometryFromWkb(
shapely.wkb.dumps(
results[counter]
)
)
)
shp_layer.CreateFeature( out_feat )
out_feat.Destroy()
else:
print 'geometry is too small: '+in_feat.GetField(16)
in_feat.Destroy()
in_feat = in_layer.GetNextFeature()
counter += 1
# Cleanup
shp_ds.Destroy()
in_ds.Destroy()
print int(round(time.time() * 1000)) - start | agpl-3.0 |
ntt-sic/heat | heat/tests/test_sqlalchemy_types.py | 1 | 1833 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from heat.db.sqlalchemy.types import LongText
from heat.db.sqlalchemy.types import Json
from sqlalchemy import types
from sqlalchemy.dialects.mysql.base import MySQLDialect
from sqlalchemy.dialects.sqlite.base import SQLiteDialect
class LongTextTest(testtools.TestCase):
def setUp(self):
super(LongTextTest, self).setUp()
self.sqltype = LongText()
def test_load_dialect_impl(self):
dialect = MySQLDialect()
impl = self.sqltype.load_dialect_impl(dialect)
self.assertNotEqual(types.Text, type(impl))
dialect = SQLiteDialect()
impl = self.sqltype.load_dialect_impl(dialect)
self.assertEqual(types.Text, type(impl))
class JsonTest(testtools.TestCase):
def setUp(self):
super(JsonTest, self).setUp()
self.sqltype = Json()
def test_process_bind_param(self):
dialect = None
value = {'foo': 'bar'}
result = self.sqltype.process_bind_param(value, dialect)
self.assertEqual('{"foo": "bar"}', result)
def test_process_result_value(self):
dialect = None
value = '{"foo": "bar"}'
result = self.sqltype.process_result_value(value, dialect)
self.assertEqual({'foo': 'bar'}, result)
| apache-2.0 |
ingmarschuster/ModelSelection | modsel/scribblings/eval_is_si.py | 1 | 4431 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 6 09:47:47 2015
@author: Ingmar Schuster
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy as sp
import scipy.stats as stats
from numpy import exp, log, sqrt
from scipy.misc import logsumexp
from numpy.linalg import inv
import modsel.estimator_statistics as es
import cPickle as pick
from copy import copy
import matplotlib as mpl
from modsel.evidence import evidence_from_importance_weights
import matplotlib.pyplot as plt
def plot_var_bias_mse(res, outfname = "plot.pdf", ylabel_pre = ""):
ssize = sorted(res.keys())
st = res[ssize[0]].keys()
st_abs = []
st_rel = []
for s in st:
if s.endswith("(relat)"):
continue
st_rel.append(s)
else:
st_abs.append(s)
st_abs.sort()
st_rel.sort()
st = copy(st_abs)
#st.extend(st_rel)
estimators = res[ssize[0]][st[0]].keys()
fig, axes = plt.subplots(ncols=max(len(st_abs), len(st_rel)), nrows = 1, figsize=(9,3))
for i in range(len(st)):
m = st[i]
a = axes.flat[i]
for e in estimators:
x = np.log(sorted(res.keys()))
y = np.array([res[i][m][e] for i in ssize]).flatten()
#assert()
a.plot(x, y, label=e)
a.set_title("")
a.set_xlabel("log # lhood evals")
a.set_ylabel(ylabel_pre+m)
a.autoscale("both")
a.set_aspect("auto", adjustable="datalim")
lgd = axes[len(st_abs)-1].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
#fig.suptitle(title + "; dim=" + str(dims))
# fig.figure(num=1, figsize=(1,3))
fig.tight_layout()
fig.savefig(outfname, bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
# {"post":post_param, "prop":prop_param,
# "perm_x":perm_x,
# "log_importance_weights":perm_weights,
# "M": M, "K":K,
# "log_evid":log_evid }
def estim_stats_progression(samp, log_weights, true_exp, true_ev = None, norm_w = True, steps = 10):
idx_norm = np.round(np.logspace(1, np.log10(samp[0].shape[0]), steps, base=10)).astype(int)
idx_infl = np.round(np.logspace(1, np.log10(np.vstack(samp).shape[0]), steps, base=10)).astype(int)
est = {}
ev_est = {}
for i in range(steps):
est[idx_norm[i]] = {"Standard":estim(samp[0][:idx_norm[i]], log_weights[0][:idx_norm[i]], norm_w = norm_w),
"Inflation": estim(np.vstack(samp)[:idx_infl[i]], np.hstack(log_weights)[:idx_infl[i]], norm_w = norm_w),
"GroundTruth": true_exp}
ev_est[idx_norm[i]] = {"Standard": np.atleast_2d(log_weights[0][:idx_norm[i]]).T,
"Inflation": np.atleast_2d(np.hstack(log_weights)[:idx_infl[i]]).T,
"GroundTruth": true_ev}
return (est, ev_est)
def construct_long_run(samp, log_weights):
rval_samp = samp[0]
rval_lw = log_weights[0]
for i in range(len(samp)):
rval_samp
def estim(samp, log_weights, norm_w = True):
if norm_w is True:
log_weights = log_weights - logsumexp(log_weights)
(lsamp, lsamp_sign) = es.log_sign(samp)
(lws, lws_sign) = es.logaddexp(lsamp, np.atleast_2d(log_weights).T, lsamp_sign)
return es.exp_sign(lws, lws_sign).mean(0)
# {"post":post_param, "prop":prop_param,
# "perm_x":perm_x,
# "log_importance_weights":perm_weights,
# "M": M, "K":K,
# "log_evid":log_evid }
def plot(fname, num_runs = 100):
with open(fname, "r") as f:
res = pick.load(f)
perm_x = np.hstack(res["perm_x"][:num_runs]) # stack up to long run
liw = np.hstack(res["log_importance_weights"][:num_runs]) # stack up to long run
std_ss = perm_x[0].shape[0]
infl_ss = len(perm_x)*perm_x[0].shape[0]
print("Standard IS:", std_ss, "samples, Inflated:", infl_ss)
added = "__is_"+str(std_ss)+"_-_issi_"+str(infl_ss)+"_post"+str(res["post"][0])+"_prop"+str(res["prop"][0])+"_M"+str(res["M"])+"_K"+str(res["K"])+"_logevid"+str(res["log_evid"])
print(fname+added)
#return
(s, ev_s) = estim_stats_progression(perm_x, liw, res["post"][0], np.atleast_2d(res["log_evid"]))
s_stat = es.statistics(s)
ev_stat = es.logstatistics(ev_s)
#assert()
plot_var_bias_mse(s_stat, outfname = fname+added+".pdf")
plot_var_bias_mse(ev_stat, outfname = fname+added+"_evidence.pdf", ylabel_pre="log ")
| gpl-3.0 |
gacarrillor/QGIS | python/plugins/db_manager/db_plugins/postgis/plugin_test.py | 30 | 5901 | # -*- coding: utf-8 -*-
"""
***************************************************************************
plugin_test.py
---------------------
Date : May 2017
Copyright : (C) 2017, Sandro Santilli
Email : strk at kbt dot io
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Sandro Santilli'
__date__ = 'May 2017'
__copyright__ = '(C) 2017, Sandro Santilli'
import os
import re
import qgis
from qgis.testing import start_app, unittest
from qgis.core import QgsDataSourceUri
from qgis.utils import iface
from qgis.PyQt.QtCore import QObject
start_app()
from db_manager.db_plugins.postgis.plugin import PostGisDBPlugin, PGRasterTable
from db_manager.db_plugins.postgis.plugin import PGDatabase
from db_manager.db_plugins.postgis.data_model import PGSqlResultModel
from db_manager.db_plugins.plugin import Table
from db_manager.db_plugins.postgis.connector import PostGisDBConnector
class TestDBManagerPostgisPlugin(unittest.TestCase):
@classmethod
def setUpClass(self):
self.old_pgdatabase_env = os.environ.get('PGDATABASE')
# QGIS_PGTEST_DB contains the full connection string and not only the DB name!
QGIS_PGTEST_DB = os.environ.get('QGIS_PGTEST_DB')
if QGIS_PGTEST_DB is not None:
test_uri = QgsDataSourceUri(QGIS_PGTEST_DB)
self.testdb = test_uri.database()
else:
self.testdb = 'qgis_test'
os.environ['PGDATABASE'] = self.testdb
# Create temporary service file
self.old_pgservicefile_env = os.environ.get('PGSERVICEFILE')
self.tmpservicefile = '/tmp/qgis-test-{}-pg_service.conf'.format(os.getpid())
os.environ['PGSERVICEFILE'] = self.tmpservicefile
f = open(self.tmpservicefile, "w")
f.write("[dbmanager]\ndbname={}\n".format(self.testdb))
# TODO: add more things if PGSERVICEFILE was already set ?
f.close()
@classmethod
def tearDownClass(self):
# Restore previous env variables if needed
if self.old_pgdatabase_env:
os.environ['PGDATABASE'] = self.old_pgdatabase_env
if self.old_pgservicefile_env:
os.environ['PGSERVICEFILE'] = self.old_pgservicefile_env
# Remove temporary service file
os.unlink(self.tmpservicefile)
# See https://github.com/qgis/QGIS/issues/24525
def test_rasterTableURI(self):
def check_rasterTableURI(expected_dbname):
tables = database.tables()
raster_tables_count = 0
for tab in tables:
if tab.type == Table.RasterType:
raster_tables_count += 1
uri = tab.uri()
m = re.search(' dbname=\'([^ ]*)\' ', uri)
self.assertTrue(m)
actual_dbname = m.group(1)
self.assertEqual(actual_dbname, expected_dbname)
# print(tab.type)
# print(tab.quotedName())
# print(tab)
# We need to make sure a database is created with at
# least one raster table !
self.assertGreaterEqual(raster_tables_count, 1)
obj = QObject() # needs to be kept alive
obj.connectionName = lambda: 'fake'
obj.providerName = lambda: 'postgres'
# Test for empty URI
# See https://github.com/qgis/QGIS/issues/24525
# and https://github.com/qgis/QGIS/issues/19005
expected_dbname = self.testdb
os.environ['PGDATABASE'] = expected_dbname
database = PGDatabase(obj, QgsDataSourceUri())
self.assertIsInstance(database, PGDatabase)
uri = database.uri()
self.assertEqual(uri.host(), '')
self.assertEqual(uri.username(), '')
self.assertEqual(uri.database(), expected_dbname)
self.assertEqual(uri.service(), '')
check_rasterTableURI(expected_dbname)
# Test for service-only URI
# See https://github.com/qgis/QGIS/issues/24526
os.environ['PGDATABASE'] = 'fake'
database = PGDatabase(obj, QgsDataSourceUri('service=dbmanager'))
self.assertIsInstance(database, PGDatabase)
uri = database.uri()
self.assertEqual(uri.host(), '')
self.assertEqual(uri.username(), '')
self.assertEqual(uri.database(), '')
self.assertEqual(uri.service(), 'dbmanager')
check_rasterTableURI(expected_dbname)
# See https://github.com/qgis/QGIS/issues/24732
def test_unicodeInQuery(self):
os.environ['PGDATABASE'] = self.testdb
obj = QObject() # needs to be kept alive
obj.connectionName = lambda: 'fake'
obj.providerName = lambda: 'postgres'
database = PGDatabase(obj, QgsDataSourceUri())
self.assertIsInstance(database, PGDatabase)
# SQL as string literal
res = database.sqlResultModel("SELECT 'é'::text", obj)
self.assertIsInstance(res, PGSqlResultModel)
dat = res.getData(0, 0)
self.assertEqual(dat, u"é")
# SQL as unicode literal
res = database.sqlResultModel(u"SELECT 'é'::text", obj)
self.assertIsInstance(res, PGSqlResultModel)
dat = res.getData(0, 0)
self.assertEqual(dat, u"é")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
abdellatifkarroum/odoo | addons/account/account_invoice.py | 5 | 79092 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import itertools
from lxml import etree
from openerp import models, fields, api, _
from openerp.exceptions import except_orm, Warning, RedirectWarning
from openerp.tools import float_compare
import openerp.addons.decimal_precision as dp
# mapping invoice type to journal type
TYPE2JOURNAL = {
'out_invoice': 'sale',
'in_invoice': 'purchase',
'out_refund': 'sale_refund',
'in_refund': 'purchase_refund',
}
# mapping invoice type to refund type
TYPE2REFUND = {
'out_invoice': 'out_refund', # Customer Invoice
'in_invoice': 'in_refund', # Supplier Invoice
'out_refund': 'out_invoice', # Customer Refund
'in_refund': 'in_invoice', # Supplier Refund
}
MAGIC_COLUMNS = ('id', 'create_uid', 'create_date', 'write_uid', 'write_date')
class account_invoice(models.Model):
_name = "account.invoice"
_inherit = ['mail.thread']
_description = "Invoice"
_order = "number desc, id desc"
_track = {
'type': {
},
'state': {
'account.mt_invoice_paid': lambda self, cr, uid, obj, ctx=None: obj.state == 'paid' and obj.type in ('out_invoice', 'out_refund'),
'account.mt_invoice_validated': lambda self, cr, uid, obj, ctx=None: obj.state == 'open' and obj.type in ('out_invoice', 'out_refund'),
},
}
@api.one
@api.depends('invoice_line.price_subtotal', 'tax_line.amount')
def _compute_amount(self):
self.amount_untaxed = sum(line.price_subtotal for line in self.invoice_line)
self.amount_tax = sum(line.amount for line in self.tax_line)
self.amount_total = self.amount_untaxed + self.amount_tax
@api.model
def _default_journal(self):
inv_type = self._context.get('type', 'out_invoice')
inv_types = inv_type if isinstance(inv_type, list) else [inv_type]
company_id = self._context.get('company_id', self.env.user.company_id.id)
domain = [
('type', 'in', filter(None, map(TYPE2JOURNAL.get, inv_types))),
('company_id', '=', company_id),
]
return self.env['account.journal'].search(domain, limit=1)
@api.model
def _default_currency(self):
journal = self._default_journal()
return journal.currency or journal.company_id.currency_id
@api.model
@api.returns('account.analytic.journal', lambda r: r.id)
def _get_journal_analytic(self, inv_type):
""" Return the analytic journal corresponding to the given invoice type. """
type2journal = {'out_invoice': 'sale', 'in_invoice': 'purchase', 'out_refund': 'sale', 'in_refund': 'purchase'}
journal_type = type2journal.get(inv_type, 'sale')
journal = self.env['account.analytic.journal'].search([('type', '=', journal_type)], limit=1)
if not journal:
raise except_orm(_('No Analytic Journal!'),
_("You must define an analytic journal of type '%s'!") % (journal_type,))
return journal[0]
@api.one
@api.depends('account_id', 'move_id.line_id.account_id', 'move_id.line_id.reconcile_id')
def _compute_reconciled(self):
self.reconciled = self.test_paid()
@api.model
def _get_reference_type(self):
return [('none', _('Free Reference'))]
@api.one
@api.depends(
'state', 'currency_id', 'invoice_line.price_subtotal',
'move_id.line_id.account_id.type',
'move_id.line_id.amount_residual',
# Fixes the fact that move_id.line_id.amount_residual, being not stored and old API, doesn't trigger recomputation
'move_id.line_id.reconcile_id',
'move_id.line_id.amount_residual_currency',
'move_id.line_id.currency_id',
'move_id.line_id.reconcile_partial_id.line_partial_ids.invoice.type',
)
# An invoice's residual amount is the sum of its unreconciled move lines and,
# for partially reconciled move lines, their residual amount divided by the
# number of times this reconciliation is used in an invoice (so we split
# the residual amount between all invoice)
def _compute_residual(self):
self.residual = 0.0
# Each partial reconciliation is considered only once for each invoice it appears into,
# and its residual amount is divided by this number of invoices
partial_reconciliations_done = []
for line in self.sudo().move_id.line_id:
if line.account_id.type not in ('receivable', 'payable'):
continue
if line.reconcile_partial_id and line.reconcile_partial_id.id in partial_reconciliations_done:
continue
# Get the correct line residual amount
if line.currency_id == self.currency_id:
line_amount = line.currency_id and line.amount_residual_currency or line.amount_residual
else:
from_currency = line.company_id.currency_id.with_context(date=line.date)
line_amount = from_currency.compute(line.amount_residual, self.currency_id)
# For partially reconciled lines, split the residual amount
if line.reconcile_partial_id:
partial_reconciliation_invoices = set()
for pline in line.reconcile_partial_id.line_partial_ids:
if pline.invoice and self.type == pline.invoice.type:
partial_reconciliation_invoices.update([pline.invoice.id])
line_amount = self.currency_id.round(line_amount / len(partial_reconciliation_invoices))
partial_reconciliations_done.append(line.reconcile_partial_id.id)
self.residual += line_amount
self.residual = max(self.residual, 0.0)
@api.one
@api.depends(
'move_id.line_id.account_id',
'move_id.line_id.reconcile_id.line_id',
'move_id.line_id.reconcile_partial_id.line_partial_ids',
)
def _compute_move_lines(self):
# Give Journal Items related to the payment reconciled to this invoice.
# Return partial and total payments related to the selected invoice.
self.move_lines = self.env['account.move.line']
if not self.move_id:
return
data_lines = self.move_id.line_id.filtered(lambda l: l.account_id == self.account_id)
partial_lines = self.env['account.move.line']
for data_line in data_lines:
if data_line.reconcile_id:
lines = data_line.reconcile_id.line_id
elif data_line.reconcile_partial_id:
lines = data_line.reconcile_partial_id.line_partial_ids
else:
lines = self.env['account.move.line']
partial_lines += data_line
self.move_lines = lines - partial_lines
@api.one
@api.depends(
'move_id.line_id.reconcile_id.line_id',
'move_id.line_id.reconcile_partial_id.line_partial_ids',
)
def _compute_payments(self):
partial_lines = lines = self.env['account.move.line']
for line in self.move_id.line_id:
if line.account_id != self.account_id:
continue
if line.reconcile_id:
lines |= line.reconcile_id.line_id
elif line.reconcile_partial_id:
lines |= line.reconcile_partial_id.line_partial_ids
partial_lines += line
self.payment_ids = (lines - partial_lines).sorted()
name = fields.Char(string='Reference/Description', index=True,
readonly=True, states={'draft': [('readonly', False)]})
origin = fields.Char(string='Source Document',
help="Reference of the document that produced this invoice.",
readonly=True, states={'draft': [('readonly', False)]})
supplier_invoice_number = fields.Char(string='Supplier Invoice Number',
help="The reference of this invoice as provided by the supplier.",
readonly=True, states={'draft': [('readonly', False)]})
type = fields.Selection([
('out_invoice','Customer Invoice'),
('in_invoice','Supplier Invoice'),
('out_refund','Customer Refund'),
('in_refund','Supplier Refund'),
], string='Type', readonly=True, index=True, change_default=True,
default=lambda self: self._context.get('type', 'out_invoice'),
track_visibility='always')
number = fields.Char(related='move_id.name', store=True, readonly=True, copy=False)
internal_number = fields.Char(string='Invoice Number', readonly=True,
default=False, copy=False,
help="Unique number of the invoice, computed automatically when the invoice is created.")
reference = fields.Char(string='Invoice Reference',
help="The partner reference of this invoice.")
reference_type = fields.Selection('_get_reference_type', string='Payment Reference',
required=True, readonly=True, states={'draft': [('readonly', False)]},
default='none')
comment = fields.Text('Additional Information')
state = fields.Selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('paid','Paid'),
('cancel','Cancelled'),
], string='Status', index=True, readonly=True, default='draft',
track_visibility='onchange', copy=False,
help=" * The 'Draft' status is used when a user is encoding a new and unconfirmed Invoice.\n"
" * The 'Pro-forma' when invoice is in Pro-forma status,invoice does not have an invoice number.\n"
" * The 'Open' status is used when user create invoice,a invoice number is generated.Its in open status till user does not pay invoice.\n"
" * The 'Paid' status is set automatically when the invoice is paid. Its related journal entries may or may not be reconciled.\n"
" * The 'Cancelled' status is used when user cancel invoice.")
sent = fields.Boolean(readonly=True, default=False, copy=False,
help="It indicates that the invoice has been sent.")
date_invoice = fields.Date(string='Invoice Date',
readonly=True, states={'draft': [('readonly', False)]}, index=True,
help="Keep empty to use the current date", copy=False)
date_due = fields.Date(string='Due Date',
readonly=True, states={'draft': [('readonly', False)]}, index=True, copy=False,
help="If you use payment terms, the due date will be computed automatically at the generation "
"of accounting entries. The payment term may compute several due dates, for example 50% "
"now and 50% in one month, but if you want to force a due date, make sure that the payment "
"term is not set on the invoice. If you keep the payment term and the due date empty, it "
"means direct payment.")
partner_id = fields.Many2one('res.partner', string='Partner', change_default=True,
required=True, readonly=True, states={'draft': [('readonly', False)]},
track_visibility='always')
payment_term = fields.Many2one('account.payment.term', string='Payment Terms',
readonly=True, states={'draft': [('readonly', False)]},
help="If you use payment terms, the due date will be computed automatically at the generation "
"of accounting entries. If you keep the payment term and the due date empty, it means direct payment. "
"The payment term may compute several due dates, for example 50% now, 50% in one month.")
period_id = fields.Many2one('account.period', string='Force Period',
domain=[('state', '!=', 'done')], copy=False,
help="Keep empty to use the period of the validation(invoice) date.",
readonly=True, states={'draft': [('readonly', False)]})
account_id = fields.Many2one('account.account', string='Account',
required=True, readonly=True, states={'draft': [('readonly', False)]},
help="The partner account used for this invoice.")
invoice_line = fields.One2many('account.invoice.line', 'invoice_id', string='Invoice Lines',
readonly=True, states={'draft': [('readonly', False)]}, copy=True)
tax_line = fields.One2many('account.invoice.tax', 'invoice_id', string='Tax Lines',
readonly=True, states={'draft': [('readonly', False)]}, copy=True)
move_id = fields.Many2one('account.move', string='Journal Entry',
readonly=True, index=True, ondelete='restrict', copy=False,
help="Link to the automatically generated Journal Items.")
amount_untaxed = fields.Float(string='Subtotal', digits=dp.get_precision('Account'),
store=True, readonly=True, compute='_compute_amount', track_visibility='always')
amount_tax = fields.Float(string='Tax', digits=dp.get_precision('Account'),
store=True, readonly=True, compute='_compute_amount')
amount_total = fields.Float(string='Total', digits=dp.get_precision('Account'),
store=True, readonly=True, compute='_compute_amount')
currency_id = fields.Many2one('res.currency', string='Currency',
required=True, readonly=True, states={'draft': [('readonly', False)]},
default=_default_currency, track_visibility='always')
journal_id = fields.Many2one('account.journal', string='Journal',
required=True, readonly=True, states={'draft': [('readonly', False)]},
default=_default_journal,
domain="[('type', 'in', {'out_invoice': ['sale'], 'out_refund': ['sale_refund'], 'in_refund': ['purchase_refund'], 'in_invoice': ['purchase']}.get(type, [])), ('company_id', '=', company_id)]")
company_id = fields.Many2one('res.company', string='Company', change_default=True,
required=True, readonly=True, states={'draft': [('readonly', False)]},
default=lambda self: self.env['res.company']._company_default_get('account.invoice'))
check_total = fields.Float(string='Verification Total', digits=dp.get_precision('Account'),
readonly=True, states={'draft': [('readonly', False)]}, default=0.0)
reconciled = fields.Boolean(string='Paid/Reconciled',
store=True, readonly=True, compute='_compute_reconciled',
help="It indicates that the invoice has been paid and the journal entry of the invoice has been reconciled with one or several journal entries of payment.")
partner_bank_id = fields.Many2one('res.partner.bank', string='Bank Account',
help='Bank Account Number to which the invoice will be paid. A Company bank account if this is a Customer Invoice or Supplier Refund, otherwise a Partner bank account number.',
readonly=True, states={'draft': [('readonly', False)]})
move_lines = fields.Many2many('account.move.line', string='Entry Lines',
compute='_compute_move_lines')
residual = fields.Float(string='Balance', digits=dp.get_precision('Account'),
compute='_compute_residual', store=True,
help="Remaining amount due.")
payment_ids = fields.Many2many('account.move.line', string='Payments',
compute='_compute_payments')
move_name = fields.Char(string='Journal Entry', readonly=True,
states={'draft': [('readonly', False)]}, copy=False)
user_id = fields.Many2one('res.users', string='Salesperson', track_visibility='onchange',
readonly=True, states={'draft': [('readonly', False)]},
default=lambda self: self.env.user)
fiscal_position = fields.Many2one('account.fiscal.position', string='Fiscal Position',
readonly=True, states={'draft': [('readonly', False)]})
commercial_partner_id = fields.Many2one('res.partner', string='Commercial Entity',
related='partner_id.commercial_partner_id', store=True, readonly=True,
help="The commercial entity that will be used on Journal Entries for this invoice")
_sql_constraints = [
('number_uniq', 'unique(number, company_id, journal_id, type)',
'Invoice Number must be unique per Company!'),
]
@api.model
def fields_view_get(self, view_id=None, view_type=False, toolbar=False, submenu=False):
context = self._context
def get_view_id(xid, name):
try:
return self.env['ir.model.data'].xmlid_to_res_id('account.' + xid, raise_if_not_found=True)
except ValueError:
try:
return self.env['ir.ui.view'].search([('name', '=', name)], limit=1).id
except Exception:
return False # view not found
if context.get('active_model') == 'res.partner' and context.get('active_ids'):
partner = self.env['res.partner'].browse(context['active_ids'])[0]
if not view_type:
view_id = get_view_id('invoice_tree', 'account.invoice.tree')
view_type = 'tree'
elif view_type == 'form':
if partner.supplier and not partner.customer:
view_id = get_view_id('invoice_supplier_form', 'account.invoice.supplier.form')
elif partner.customer and not partner.supplier:
view_id = get_view_id('invoice_form', 'account.invoice.form')
res = super(account_invoice, self).fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
# adapt selection of field journal_id
for field in res['fields']:
if field == 'journal_id' and type:
journal_select = self.env['account.journal']._name_search('', [('type', '=', type)], name_get_uid=1)
res['fields'][field]['selection'] = journal_select
doc = etree.XML(res['arch'])
if context.get('type'):
for node in doc.xpath("//field[@name='partner_bank_id']"):
if context['type'] == 'in_refund':
node.set('domain', "[('partner_id.ref_companies', 'in', [company_id])]")
elif context['type'] == 'out_refund':
node.set('domain', "[('partner_id', '=', partner_id)]")
if view_type == 'search':
if context.get('type') in ('out_invoice', 'out_refund'):
for node in doc.xpath("//group[@name='extended filter']"):
doc.remove(node)
if view_type == 'tree':
partner_string = _('Customer')
if context.get('type') in ('in_invoice', 'in_refund'):
partner_string = _('Supplier')
for node in doc.xpath("//field[@name='reference']"):
node.set('invisible', '0')
for node in doc.xpath("//field[@name='partner_id']"):
node.set('string', partner_string)
res['arch'] = etree.tostring(doc)
return res
@api.multi
def invoice_print(self):
""" Print the invoice and mark it as sent, so that we can see more
easily the next step of the workflow
"""
assert len(self) == 1, 'This option should only be used for a single id at a time.'
self.sent = True
return self.env['report'].get_action(self, 'account.report_invoice')
@api.multi
def action_invoice_sent(self):
""" Open a window to compose an email, with the edi invoice template
message loaded by default
"""
assert len(self) == 1, 'This option should only be used for a single id at a time.'
template = self.env.ref('account.email_template_edi_invoice', False)
compose_form = self.env.ref('mail.email_compose_message_wizard_form', False)
ctx = dict(
default_model='account.invoice',
default_res_id=self.id,
default_use_template=bool(template),
default_template_id=template.id,
default_composition_mode='comment',
mark_invoice_as_sent=True,
)
return {
'name': _('Compose Email'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form.id, 'form')],
'view_id': compose_form.id,
'target': 'new',
'context': ctx,
}
@api.multi
def confirm_paid(self):
return self.write({'state': 'paid'})
@api.multi
def unlink(self):
for invoice in self:
if invoice.state not in ('draft', 'cancel'):
raise Warning(_('You cannot delete an invoice which is not draft or cancelled. You should refund it instead.'))
elif invoice.internal_number:
raise Warning(_('You cannot delete an invoice after it has been validated (and received a number). You can set it back to "Draft" state and modify its content, then re-confirm it.'))
return super(account_invoice, self).unlink()
@api.multi
def onchange_partner_id(self, type, partner_id, date_invoice=False,
payment_term=False, partner_bank_id=False, company_id=False):
account_id = False
payment_term_id = False
fiscal_position = False
bank_id = False
if partner_id:
p = self.env['res.partner'].browse(partner_id)
rec_account = p.property_account_receivable
pay_account = p.property_account_payable
if company_id:
if p.property_account_receivable.company_id and \
p.property_account_receivable.company_id.id != company_id and \
p.property_account_payable.company_id and \
p.property_account_payable.company_id.id != company_id:
prop = self.env['ir.property']
rec_dom = [('name', '=', 'property_account_receivable'), ('company_id', '=', company_id)]
pay_dom = [('name', '=', 'property_account_payable'), ('company_id', '=', company_id)]
res_dom = [('res_id', '=', 'res.partner,%s' % partner_id)]
rec_prop = prop.search(rec_dom + res_dom) or prop.search(rec_dom)
pay_prop = prop.search(pay_dom + res_dom) or prop.search(pay_dom)
rec_account = rec_prop.get_by_record(rec_prop)
pay_account = pay_prop.get_by_record(pay_prop)
if not rec_account and not pay_account:
action = self.env.ref('account.action_account_config')
msg = _('Cannot find a chart of accounts for this company, You should configure it. \nPlease go to Account Configuration.')
raise RedirectWarning(msg, action.id, _('Go to the configuration panel'))
if type in ('out_invoice', 'out_refund'):
account_id = rec_account.id
payment_term_id = p.property_payment_term.id
else:
account_id = pay_account.id
payment_term_id = p.property_supplier_payment_term.id
fiscal_position = p.property_account_position.id
bank_id = p.bank_ids and p.bank_ids[0].id or False
result = {'value': {
'account_id': account_id,
'payment_term': payment_term_id,
'fiscal_position': fiscal_position,
}}
if type in ('in_invoice', 'in_refund'):
result['value']['partner_bank_id'] = bank_id
if payment_term != payment_term_id:
if payment_term_id:
to_update = self.onchange_payment_term_date_invoice(payment_term_id, date_invoice)
result['value'].update(to_update.get('value', {}))
else:
result['value']['date_due'] = False
if partner_bank_id != bank_id:
to_update = self.onchange_partner_bank(bank_id)
result['value'].update(to_update.get('value', {}))
return result
@api.multi
def onchange_journal_id(self, journal_id=False):
if journal_id:
journal = self.env['account.journal'].browse(journal_id)
return {
'value': {
'currency_id': journal.currency.id or journal.company_id.currency_id.id,
'company_id': journal.company_id.id,
}
}
return {}
@api.multi
def onchange_payment_term_date_invoice(self, payment_term_id, date_invoice):
if not date_invoice:
date_invoice = fields.Date.context_today(self)
if not payment_term_id:
# To make sure the invoice due date should contain due date which is
# entered by user when there is no payment term defined
return {'value': {'date_due': self.date_due or date_invoice}}
pterm = self.env['account.payment.term'].browse(payment_term_id)
pterm_list = pterm.compute(value=1, date_ref=date_invoice)[0]
if pterm_list:
return {'value': {'date_due': max(line[0] for line in pterm_list)}}
else:
raise except_orm(_('Insufficient Data!'),
_('The payment term of supplier does not have a payment term line.'))
@api.multi
def onchange_invoice_line(self, lines):
return {}
@api.multi
def onchange_partner_bank(self, partner_bank_id=False):
return {'value': {}}
@api.multi
def onchange_company_id(self, company_id, part_id, type, invoice_line, currency_id):
# TODO: add the missing context parameter when forward-porting in trunk
# so we can remove this hack!
self = self.with_context(self.env['res.users'].context_get())
values = {}
domain = {}
if company_id and part_id and type:
p = self.env['res.partner'].browse(part_id)
if p.property_account_payable and p.property_account_receivable and \
p.property_account_payable.company_id.id != company_id and \
p.property_account_receivable.company_id.id != company_id:
prop = self.env['ir.property']
rec_dom = [('name', '=', 'property_account_receivable'), ('company_id', '=', company_id)]
pay_dom = [('name', '=', 'property_account_payable'), ('company_id', '=', company_id)]
res_dom = [('res_id', '=', 'res.partner,%s' % part_id)]
rec_prop = prop.search(rec_dom + res_dom) or prop.search(rec_dom)
pay_prop = prop.search(pay_dom + res_dom) or prop.search(pay_dom)
rec_account = rec_prop.get_by_record(rec_prop)
pay_account = pay_prop.get_by_record(pay_prop)
if not rec_account and not pay_account:
action = self.env.ref('account.action_account_config')
msg = _('Cannot find a chart of accounts for this company, You should configure it. \nPlease go to Account Configuration.')
raise RedirectWarning(msg, action.id, _('Go to the configuration panel'))
if type in ('out_invoice', 'out_refund'):
acc_id = rec_account.id
else:
acc_id = pay_account.id
values= {'account_id': acc_id}
if self:
if company_id:
for line in self.invoice_line:
if not line.account_id:
continue
if line.account_id.company_id.id == company_id:
continue
accounts = self.env['account.account'].search([('name', '=', line.account_id.name), ('company_id', '=', company_id)])
if not accounts:
action = self.env.ref('account.action_account_config')
msg = _('Cannot find a chart of accounts for this company, You should configure it. \nPlease go to Account Configuration.')
raise RedirectWarning(msg, action.id, _('Go to the configuration panel'))
line.write({'account_id': accounts[-1].id})
else:
for line_cmd in invoice_line or []:
if len(line_cmd) >= 3 and isinstance(line_cmd[2], dict):
line = self.env['account.account'].browse(line_cmd[2]['account_id'])
if line.company_id.id != company_id:
raise except_orm(
_('Configuration Error!'),
_("Invoice line account's company and invoice's company does not match.")
)
if company_id and type:
journal_type = TYPE2JOURNAL[type]
journals = self.env['account.journal'].search([('type', '=', journal_type), ('company_id', '=', company_id)])
if journals:
values['journal_id'] = journals[0].id
journal_defaults = self.env['ir.values'].get_defaults_dict('account.invoice', 'type=%s' % type)
if 'journal_id' in journal_defaults:
values['journal_id'] = journal_defaults['journal_id']
if not values.get('journal_id'):
field_desc = journals.fields_get(['type'])
type_label = next(t for t, label in field_desc['type']['selection'] if t == journal_type)
action = self.env.ref('account.action_account_journal_form')
msg = _('Cannot find any account journal of type "%s" for this company, You should create one.\n Please go to Journal Configuration') % type_label
raise RedirectWarning(msg, action.id, _('Go to the configuration panel'))
domain = {'journal_id': [('id', 'in', journals.ids)]}
return {'value': values, 'domain': domain}
@api.multi
def action_cancel_draft(self):
# go from canceled state to draft state
self.write({'state': 'draft'})
self.delete_workflow()
self.create_workflow()
return True
@api.one
@api.returns('ir.ui.view')
def get_formview_id(self):
""" Update form view id of action to open the invoice """
if self.type == 'in_invoice':
return self.env.ref('account.invoice_supplier_form')
else:
return self.env.ref('account.invoice_form')
@api.multi
def move_line_id_payment_get(self):
# return the move line ids with the same account as the invoice self
if not self.id:
return []
query = """ SELECT l.id
FROM account_move_line l, account_invoice i
WHERE i.id = %s AND l.move_id = i.move_id AND l.account_id = i.account_id
"""
self._cr.execute(query, (self.id,))
return [row[0] for row in self._cr.fetchall()]
@api.multi
def test_paid(self):
# check whether all corresponding account move lines are reconciled
line_ids = self.move_line_id_payment_get()
if not line_ids:
return False
query = "SELECT reconcile_id FROM account_move_line WHERE id IN %s"
self._cr.execute(query, (tuple(line_ids),))
return all(row[0] for row in self._cr.fetchall())
@api.multi
def button_reset_taxes(self):
account_invoice_tax = self.env['account.invoice.tax']
ctx = dict(self._context)
for invoice in self:
self._cr.execute("DELETE FROM account_invoice_tax WHERE invoice_id=%s AND manual is False", (invoice.id,))
self.invalidate_cache()
partner = invoice.partner_id
if partner.lang:
ctx['lang'] = partner.lang
for taxe in account_invoice_tax.compute(invoice.with_context(ctx)).values():
account_invoice_tax.create(taxe)
# dummy write on self to trigger recomputations
return self.with_context(ctx).write({'invoice_line': []})
@api.multi
def button_compute(self, set_total=False):
self.button_reset_taxes()
for invoice in self:
if set_total:
invoice.check_total = invoice.amount_total
return True
@api.multi
def _get_analytic_lines(self):
""" Return a list of dict for creating analytic lines for self[0] """
company_currency = self.company_id.currency_id
sign = 1 if self.type in ('out_invoice', 'in_refund') else -1
iml = self.env['account.invoice.line'].move_line_get(self.id)
for il in iml:
if il['account_analytic_id']:
if self.type in ('in_invoice', 'in_refund'):
ref = self.reference
else:
ref = self.number
if not self.journal_id.analytic_journal_id:
raise except_orm(_('No Analytic Journal!'),
_("You have to define an analytic journal on the '%s' journal!") % (self.journal_id.name,))
currency = self.currency_id.with_context(date=self.date_invoice)
il['analytic_lines'] = [(0,0, {
'name': il['name'],
'date': self.date_invoice,
'account_id': il['account_analytic_id'],
'unit_amount': il['quantity'],
'amount': currency.compute(il['price'], company_currency) * sign,
'product_id': il['product_id'],
'product_uom_id': il['uos_id'],
'general_account_id': il['account_id'],
'journal_id': self.journal_id.analytic_journal_id.id,
'ref': ref,
})]
return iml
@api.multi
def action_date_assign(self):
for inv in self:
res = inv.onchange_payment_term_date_invoice(inv.payment_term.id, inv.date_invoice)
if res and res.get('value'):
inv.write(res['value'])
return True
@api.multi
def finalize_invoice_move_lines(self, move_lines):
""" finalize_invoice_move_lines(move_lines) -> move_lines
Hook method to be overridden in additional modules to verify and
possibly alter the move lines to be created by an invoice, for
special cases.
:param move_lines: list of dictionaries with the account.move.lines (as for create())
:return: the (possibly updated) final move_lines to create for this invoice
"""
return move_lines
@api.multi
def check_tax_lines(self, compute_taxes):
account_invoice_tax = self.env['account.invoice.tax']
company_currency = self.company_id.currency_id
if not self.tax_line:
for tax in compute_taxes.values():
account_invoice_tax.create(tax)
else:
tax_key = []
precision = self.env['decimal.precision'].precision_get('Account')
for tax in self.tax_line:
if tax.manual:
continue
key = (tax.tax_code_id.id, tax.base_code_id.id, tax.account_id.id)
tax_key.append(key)
if key not in compute_taxes:
raise except_orm(_('Warning!'), _('Global taxes defined, but they are not in invoice lines !'))
base = compute_taxes[key]['base']
if float_compare(abs(base - tax.base), company_currency.rounding, precision_digits=precision) == 1:
raise except_orm(_('Warning!'), _('Tax base different!\nClick on compute to update the tax base.'))
for key in compute_taxes:
if key not in tax_key:
raise except_orm(_('Warning!'), _('Taxes are missing!\nClick on compute button.'))
@api.multi
def compute_invoice_totals(self, company_currency, ref, invoice_move_lines):
total = 0
total_currency = 0
for line in invoice_move_lines:
if self.currency_id != company_currency:
currency = self.currency_id.with_context(date=self.date_invoice or fields.Date.context_today(self))
line['currency_id'] = currency.id
line['amount_currency'] = line['price']
line['price'] = currency.compute(line['price'], company_currency)
else:
line['currency_id'] = False
line['amount_currency'] = False
line['ref'] = ref
if self.type in ('out_invoice','in_refund'):
total += line['price']
total_currency += line['amount_currency'] or line['price']
line['price'] = - line['price']
else:
total -= line['price']
total_currency -= line['amount_currency'] or line['price']
return total, total_currency, invoice_move_lines
def inv_line_characteristic_hashcode(self, invoice_line):
"""Overridable hashcode generation for invoice lines. Lines having the same hashcode
will be grouped together if the journal has the 'group line' option. Of course a module
can add fields to invoice lines that would need to be tested too before merging lines
or not."""
return "%s-%s-%s-%s-%s" % (
invoice_line['account_id'],
invoice_line.get('tax_code_id', 'False'),
invoice_line.get('product_id', 'False'),
invoice_line.get('analytic_account_id', 'False'),
invoice_line.get('date_maturity', 'False'),
)
def group_lines(self, iml, line):
"""Merge account move lines (and hence analytic lines) if invoice line hashcodes are equals"""
if self.journal_id.group_invoice_lines:
line2 = {}
for x, y, l in line:
tmp = self.inv_line_characteristic_hashcode(l)
if tmp in line2:
am = line2[tmp]['debit'] - line2[tmp]['credit'] + (l['debit'] - l['credit'])
line2[tmp]['debit'] = (am > 0) and am or 0.0
line2[tmp]['credit'] = (am < 0) and -am or 0.0
line2[tmp]['tax_amount'] += l['tax_amount']
line2[tmp]['analytic_lines'] += l['analytic_lines']
else:
line2[tmp] = l
line = []
for key, val in line2.items():
line.append((0,0,val))
return line
@api.multi
def action_move_create(self):
""" Creates invoice related analytics and financial move lines """
account_invoice_tax = self.env['account.invoice.tax']
account_move = self.env['account.move']
for inv in self:
if not inv.journal_id.sequence_id:
raise except_orm(_('Error!'), _('Please define sequence on the journal related to this invoice.'))
if not inv.invoice_line:
raise except_orm(_('No Invoice Lines!'), _('Please create some invoice lines.'))
if inv.move_id:
continue
ctx = dict(self._context, lang=inv.partner_id.lang)
if not inv.date_invoice:
inv.with_context(ctx).write({'date_invoice': fields.Date.context_today(self)})
date_invoice = inv.date_invoice
company_currency = inv.company_id.currency_id
# create the analytical lines, one move line per invoice line
iml = inv._get_analytic_lines()
# check if taxes are all computed
compute_taxes = account_invoice_tax.compute(inv.with_context(lang=inv.partner_id.lang))
inv.check_tax_lines(compute_taxes)
# I disabled the check_total feature
if self.env['res.users'].has_group('account.group_supplier_inv_check_total'):
if inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding / 2.0):
raise except_orm(_('Bad Total!'), _('Please verify the price of the invoice!\nThe encoded total does not match the computed total.'))
if inv.payment_term:
total_fixed = total_percent = 0
for line in inv.payment_term.line_ids:
if line.value == 'fixed':
total_fixed += line.value_amount
if line.value == 'procent':
total_percent += line.value_amount
total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)
if (total_fixed + total_percent) > 100:
raise except_orm(_('Error!'), _("Cannot create the invoice.\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'."))
# one move line per tax line
iml += account_invoice_tax.move_line_get(inv.id)
if inv.type in ('in_invoice', 'in_refund'):
ref = inv.reference
else:
ref = inv.number
diff_currency = inv.currency_id != company_currency
# create one move line for the total and possibly adjust the other lines amount
total, total_currency, iml = inv.with_context(ctx).compute_invoice_totals(company_currency, ref, iml)
name = inv.supplier_invoice_number or inv.name or '/'
totlines = []
if inv.payment_term:
totlines = inv.with_context(ctx).payment_term.compute(total, date_invoice)[0]
if totlines:
res_amount_currency = total_currency
ctx['date'] = date_invoice
for i, t in enumerate(totlines):
if inv.currency_id != company_currency:
amount_currency = company_currency.with_context(ctx).compute(t[1], inv.currency_id)
else:
amount_currency = False
# last line: add the diff
res_amount_currency -= amount_currency or 0
if i + 1 == len(totlines):
amount_currency += res_amount_currency
iml.append({
'type': 'dest',
'name': name,
'price': t[1],
'account_id': inv.account_id.id,
'date_maturity': t[0],
'amount_currency': diff_currency and amount_currency,
'currency_id': diff_currency and inv.currency_id.id,
'ref': ref,
})
else:
iml.append({
'type': 'dest',
'name': name,
'price': total,
'account_id': inv.account_id.id,
'date_maturity': inv.date_due,
'amount_currency': diff_currency and total_currency,
'currency_id': diff_currency and inv.currency_id.id,
'ref': ref
})
date = date_invoice
part = self.env['res.partner']._find_accounting_partner(inv.partner_id)
line = [(0, 0, self.line_get_convert(l, part.id, date)) for l in iml]
line = inv.group_lines(iml, line)
journal = inv.journal_id.with_context(ctx)
if journal.centralisation:
raise except_orm(_('User Error!'),
_('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))
line = inv.finalize_invoice_move_lines(line)
move_vals = {
'ref': inv.reference or inv.name,
'line_id': line,
'journal_id': journal.id,
'date': inv.date_invoice,
'narration': inv.comment,
'company_id': inv.company_id.id,
}
ctx['company_id'] = inv.company_id.id
period = inv.period_id
if not period:
period = period.with_context(ctx).find(date_invoice)[:1]
if period:
move_vals['period_id'] = period.id
for i in line:
i[2]['period_id'] = period.id
ctx['invoice'] = inv
move = account_move.with_context(ctx).create(move_vals)
# make the invoice point to that move
vals = {
'move_id': move.id,
'period_id': period.id,
'move_name': move.name,
}
inv.with_context(ctx).write(vals)
# Pass invoice in context in method post: used if you want to get the same
# account move reference when creating the same invoice after a cancelled one:
move.post()
self._log_event()
return True
@api.multi
def invoice_validate(self):
return self.write({'state': 'open'})
@api.model
def line_get_convert(self, line, part, date):
return {
'date_maturity': line.get('date_maturity', False),
'partner_id': part,
'name': line['name'][:64],
'date': date,
'debit': line['price']>0 and line['price'],
'credit': line['price']<0 and -line['price'],
'account_id': line['account_id'],
'analytic_lines': line.get('analytic_lines', []),
'amount_currency': line['price']>0 and abs(line.get('amount_currency', False)) or -abs(line.get('amount_currency', False)),
'currency_id': line.get('currency_id', False),
'tax_code_id': line.get('tax_code_id', False),
'tax_amount': line.get('tax_amount', False),
'ref': line.get('ref', False),
'quantity': line.get('quantity',1.00),
'product_id': line.get('product_id', False),
'product_uom_id': line.get('uos_id', False),
'analytic_account_id': line.get('account_analytic_id', False),
}
@api.multi
def action_number(self):
#TODO: not correct fix but required a fresh values before reading it.
self.write({})
for inv in self:
self.write({'internal_number': inv.number})
if inv.type in ('in_invoice', 'in_refund'):
if not inv.reference:
ref = inv.number
else:
ref = inv.reference
else:
ref = inv.number
self._cr.execute(""" UPDATE account_move SET ref=%s
WHERE id=%s AND (ref IS NULL OR ref = '')""",
(ref, inv.move_id.id))
self._cr.execute(""" UPDATE account_move_line SET ref=%s
WHERE move_id=%s AND (ref IS NULL OR ref = '')""",
(ref, inv.move_id.id))
self._cr.execute(""" UPDATE account_analytic_line SET ref=%s
FROM account_move_line
WHERE account_move_line.move_id = %s AND
account_analytic_line.move_id = account_move_line.id""",
(ref, inv.move_id.id))
self.invalidate_cache()
return True
@api.multi
def action_cancel(self):
moves = self.env['account.move']
for inv in self:
if inv.move_id:
moves += inv.move_id
if inv.payment_ids:
for move_line in inv.payment_ids:
if move_line.reconcile_partial_id.line_partial_ids:
raise except_orm(_('Error!'), _('You cannot cancel an invoice which is partially paid. You need to unreconcile related payment entries first.'))
# First, set the invoices as cancelled and detach the move ids
self.write({'state': 'cancel', 'move_id': False})
if moves:
# second, invalidate the move(s)
moves.button_cancel()
# delete the move this invoice was pointing to
# Note that the corresponding move_lines and move_reconciles
# will be automatically deleted too
moves.unlink()
self._log_event(-1.0, 'Cancel Invoice')
return True
###################
@api.multi
def _log_event(self, factor=1.0, name='Open Invoice'):
#TODO: implement messages system
return True
@api.multi
def name_get(self):
TYPES = {
'out_invoice': _('Invoice'),
'in_invoice': _('Supplier Invoice'),
'out_refund': _('Refund'),
'in_refund': _('Supplier Refund'),
}
result = []
for inv in self:
result.append((inv.id, "%s %s" % (inv.number or TYPES[inv.type], inv.name or '')))
return result
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
recs = self.browse()
if name:
recs = self.search([('number', '=', name)] + args, limit=limit)
if not recs:
recs = self.search([('name', operator, name)] + args, limit=limit)
return recs.name_get()
@api.model
def _refund_cleanup_lines(self, lines):
""" Convert records to dict of values suitable for one2many line creation
:param recordset lines: records to convert
:return: list of command tuple for one2many line creation [(0, 0, dict of valueis), ...]
"""
result = []
for line in lines:
values = {}
for name, field in line._fields.iteritems():
if name in MAGIC_COLUMNS:
continue
elif field.type == 'many2one':
values[name] = line[name].id
elif field.type not in ['many2many', 'one2many']:
values[name] = line[name]
elif name == 'invoice_line_tax_id':
values[name] = [(6, 0, line[name].ids)]
result.append((0, 0, values))
return result
@api.model
def _prepare_refund(self, invoice, date=None, period_id=None, description=None, journal_id=None):
""" Prepare the dict of values to create the new refund from the invoice.
This method may be overridden to implement custom
refund generation (making sure to call super() to establish
a clean extension chain).
:param record invoice: invoice to refund
:param string date: refund creation date from the wizard
:param integer period_id: force account.period from the wizard
:param string description: description of the refund from the wizard
:param integer journal_id: account.journal from the wizard
:return: dict of value to create() the refund
"""
values = {}
for field in ['name', 'reference', 'comment', 'date_due', 'partner_id', 'company_id',
'account_id', 'currency_id', 'payment_term', 'user_id', 'fiscal_position']:
if invoice._fields[field].type == 'many2one':
values[field] = invoice[field].id
else:
values[field] = invoice[field] or False
values['invoice_line'] = self._refund_cleanup_lines(invoice.invoice_line)
tax_lines = filter(lambda l: l.manual, invoice.tax_line)
values['tax_line'] = self._refund_cleanup_lines(tax_lines)
if journal_id:
journal = self.env['account.journal'].browse(journal_id)
elif invoice['type'] == 'in_invoice':
journal = self.env['account.journal'].search([('type', '=', 'purchase_refund')], limit=1)
else:
journal = self.env['account.journal'].search([('type', '=', 'sale_refund')], limit=1)
values['journal_id'] = journal.id
values['type'] = TYPE2REFUND[invoice['type']]
values['date_invoice'] = date or fields.Date.context_today(invoice)
values['state'] = 'draft'
values['number'] = False
values['origin'] = invoice.number
if period_id:
values['period_id'] = period_id
if description:
values['name'] = description
return values
@api.multi
@api.returns('self')
def refund(self, date=None, period_id=None, description=None, journal_id=None):
new_invoices = self.browse()
for invoice in self:
# create the new invoice
values = self._prepare_refund(invoice, date=date, period_id=period_id,
description=description, journal_id=journal_id)
new_invoices += self.create(values)
return new_invoices
@api.v8
def pay_and_reconcile(self, pay_amount, pay_account_id, period_id, pay_journal_id,
writeoff_acc_id, writeoff_period_id, writeoff_journal_id, name=''):
# TODO check if we can use different period for payment and the writeoff line
assert len(self)==1, "Can only pay one invoice at a time."
# Take the seq as name for move
SIGN = {'out_invoice': -1, 'in_invoice': 1, 'out_refund': 1, 'in_refund': -1}
direction = SIGN[self.type]
# take the chosen date
date = self._context.get('date_p') or fields.Date.context_today(self)
# Take the amount in currency and the currency of the payment
if self._context.get('amount_currency') and self._context.get('currency_id'):
amount_currency = self._context['amount_currency']
currency_id = self._context['currency_id']
else:
amount_currency = False
currency_id = False
pay_journal = self.env['account.journal'].browse(pay_journal_id)
if self.type in ('in_invoice', 'in_refund'):
ref = self.reference
else:
ref = self.number
partner = self.partner_id._find_accounting_partner(self.partner_id)
name = name or self.invoice_line[0].name or self.number
# Pay attention to the sign for both debit/credit AND amount_currency
l1 = {
'name': name,
'debit': direction * pay_amount > 0 and direction * pay_amount,
'credit': direction * pay_amount < 0 and -direction * pay_amount,
'account_id': self.account_id.id,
'partner_id': partner.id,
'ref': ref,
'date': date,
'currency_id': currency_id,
'amount_currency': direction * (amount_currency or 0.0),
'company_id': self.company_id.id,
}
l2 = {
'name': name,
'debit': direction * pay_amount < 0 and -direction * pay_amount,
'credit': direction * pay_amount > 0 and direction * pay_amount,
'account_id': pay_account_id,
'partner_id': partner.id,
'ref': ref,
'date': date,
'currency_id': currency_id,
'amount_currency': -direction * (amount_currency or 0.0),
'company_id': self.company_id.id,
}
move = self.env['account.move'].create({
'ref': ref,
'line_id': [(0, 0, l1), (0, 0, l2)],
'journal_id': pay_journal_id,
'period_id': period_id,
'date': date,
})
move_ids = (move | self.move_id).ids
self._cr.execute("SELECT id FROM account_move_line WHERE move_id IN %s",
(tuple(move_ids),))
lines = self.env['account.move.line'].browse([r[0] for r in self._cr.fetchall()])
lines2rec = lines.browse()
total = 0.0
for line in itertools.chain(lines, self.payment_ids):
if line.account_id == self.account_id:
lines2rec += line
total += (line.debit or 0.0) - (line.credit or 0.0)
inv_id, name = self.name_get()[0]
if not round(total, self.env['decimal.precision'].precision_get('Account')) or writeoff_acc_id:
lines2rec.reconcile('manual', writeoff_acc_id, writeoff_period_id, writeoff_journal_id)
else:
code = self.currency_id.symbol
# TODO: use currency's formatting function
msg = _("Invoice partially paid: %s%s of %s%s (%s%s remaining).") % \
(pay_amount, code, self.amount_total, code, total, code)
self.message_post(body=msg)
lines2rec.reconcile_partial('manual')
# Update the stored value (fields.function), so we write to trigger recompute
return self.write({})
@api.v7
def pay_and_reconcile(self, cr, uid, ids, pay_amount, pay_account_id, period_id, pay_journal_id,
writeoff_acc_id, writeoff_period_id, writeoff_journal_id, context=None, name=''):
recs = self.browse(cr, uid, ids, context)
return recs.pay_and_reconcile(pay_amount, pay_account_id, period_id, pay_journal_id,
writeoff_acc_id, writeoff_period_id, writeoff_journal_id, name=name)
class account_invoice_line(models.Model):
_name = "account.invoice.line"
_description = "Invoice Line"
_order = "invoice_id,sequence,id"
@api.one
@api.depends('price_unit', 'discount', 'invoice_line_tax_id', 'quantity',
'product_id', 'invoice_id.partner_id', 'invoice_id.currency_id')
def _compute_price(self):
price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
taxes = self.invoice_line_tax_id.compute_all(price, self.quantity, product=self.product_id, partner=self.invoice_id.partner_id)
self.price_subtotal = taxes['total']
if self.invoice_id:
self.price_subtotal = self.invoice_id.currency_id.round(self.price_subtotal)
@api.model
def _default_price_unit(self):
if not self._context.get('check_total'):
return 0
total = self._context['check_total']
for l in self._context.get('invoice_line', []):
if isinstance(l, (list, tuple)) and len(l) >= 3 and l[2]:
vals = l[2]
price = vals.get('price_unit', 0) * (1 - vals.get('discount', 0) / 100.0)
total = total - (price * vals.get('quantity'))
taxes = vals.get('invoice_line_tax_id')
if taxes and len(taxes[0]) >= 3 and taxes[0][2]:
taxes = self.env['account.tax'].browse(taxes[0][2])
tax_res = taxes.compute_all(price, vals.get('quantity'),
product=vals.get('product_id'), partner=self._context.get('partner_id'))
for tax in tax_res['taxes']:
total = total - tax['amount']
return total
@api.model
def _default_account(self):
# XXX this gets the default account for the user's company,
# it should get the default account for the invoice's company
# however, the invoice's company does not reach this point
if self._context.get('type') in ('out_invoice', 'out_refund'):
return self.env['ir.property'].get('property_account_income_categ', 'product.category')
else:
return self.env['ir.property'].get('property_account_expense_categ', 'product.category')
name = fields.Text(string='Description', required=True)
origin = fields.Char(string='Source Document',
help="Reference of the document that produced this invoice.")
sequence = fields.Integer(string='Sequence', default=10,
help="Gives the sequence of this line when displaying the invoice.")
invoice_id = fields.Many2one('account.invoice', string='Invoice Reference',
ondelete='cascade', index=True)
uos_id = fields.Many2one('product.uom', string='Unit of Measure',
ondelete='set null', index=True)
product_id = fields.Many2one('product.product', string='Product',
ondelete='restrict', index=True)
account_id = fields.Many2one('account.account', string='Account',
required=True, domain=[('type', 'not in', ['view', 'closed'])],
default=_default_account,
help="The income or expense account related to the selected product.")
price_unit = fields.Float(string='Unit Price', required=True,
digits= dp.get_precision('Product Price'),
default=_default_price_unit)
price_subtotal = fields.Float(string='Amount', digits= dp.get_precision('Account'),
store=True, readonly=True, compute='_compute_price')
quantity = fields.Float(string='Quantity', digits= dp.get_precision('Product Unit of Measure'),
required=True, default=1)
discount = fields.Float(string='Discount (%)', digits= dp.get_precision('Discount'),
default=0.0)
invoice_line_tax_id = fields.Many2many('account.tax',
'account_invoice_line_tax', 'invoice_line_id', 'tax_id',
string='Taxes', domain=[('parent_id', '=', False)])
account_analytic_id = fields.Many2one('account.analytic.account',
string='Analytic Account')
company_id = fields.Many2one('res.company', string='Company',
related='invoice_id.company_id', store=True, readonly=True)
partner_id = fields.Many2one('res.partner', string='Partner',
related='invoice_id.partner_id', store=True, readonly=True)
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
res = super(account_invoice_line, self).fields_view_get(
view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
if self._context.get('type'):
doc = etree.XML(res['arch'])
for node in doc.xpath("//field[@name='product_id']"):
if self._context['type'] in ('in_invoice', 'in_refund'):
node.set('domain', "[('purchase_ok', '=', True)]")
else:
node.set('domain', "[('sale_ok', '=', True)]")
res['arch'] = etree.tostring(doc)
return res
@api.multi
def product_id_change(self, product, uom_id, qty=0, name='', type='out_invoice',
partner_id=False, fposition_id=False, price_unit=False, currency_id=False,
company_id=None):
context = self._context
company_id = company_id if company_id is not None else context.get('company_id', False)
self = self.with_context(company_id=company_id, force_company=company_id)
if not partner_id:
raise except_orm(_('No Partner Defined!'), _("You must first select a partner!"))
if not product:
if type in ('in_invoice', 'in_refund'):
return {'value': {}, 'domain': {'product_uom': []}}
else:
return {'value': {'price_unit': 0.0}, 'domain': {'product_uom': []}}
values = {}
part = self.env['res.partner'].browse(partner_id)
fpos = self.env['account.fiscal.position'].browse(fposition_id)
if part.lang:
self = self.with_context(lang=part.lang)
product = self.env['product.product'].browse(product)
values['name'] = product.partner_ref
if type in ('out_invoice', 'out_refund'):
account = product.property_account_income or product.categ_id.property_account_income_categ
else:
account = product.property_account_expense or product.categ_id.property_account_expense_categ
account = fpos.map_account(account)
if account:
values['account_id'] = account.id
if type in ('out_invoice', 'out_refund'):
taxes = product.taxes_id or account.tax_ids
if product.description_sale:
values['name'] += '\n' + product.description_sale
else:
taxes = product.supplier_taxes_id or account.tax_ids
if product.description_purchase:
values['name'] += '\n' + product.description_purchase
taxes = fpos.map_tax(taxes)
values['invoice_line_tax_id'] = taxes.ids
if type in ('in_invoice', 'in_refund'):
values['price_unit'] = price_unit or product.standard_price
else:
values['price_unit'] = product.lst_price
values['uos_id'] = uom_id or product.uom_id.id
domain = {'uos_id': [('category_id', '=', product.uom_id.category_id.id)]}
company = self.env['res.company'].browse(company_id)
currency = self.env['res.currency'].browse(currency_id)
if company and currency:
if company.currency_id != currency:
if type in ('in_invoice', 'in_refund'):
values['price_unit'] = product.standard_price
values['price_unit'] = values['price_unit'] * currency.rate
if values['uos_id'] and values['uos_id'] != product.uom_id.id:
values['price_unit'] = self.env['product.uom']._compute_price(
product.uom_id.id, values['price_unit'], values['uos_id'])
return {'value': values, 'domain': domain}
@api.multi
def uos_id_change(self, product, uom, qty=0, name='', type='out_invoice', partner_id=False,
fposition_id=False, price_unit=False, currency_id=False, company_id=None):
context = self._context
company_id = company_id if company_id != None else context.get('company_id', False)
self = self.with_context(company_id=company_id)
result = self.product_id_change(
product, uom, qty, name, type, partner_id, fposition_id, price_unit,
currency_id, company_id=company_id,
)
warning = {}
if not uom:
result['value']['price_unit'] = 0.0
if product and uom:
prod = self.env['product.product'].browse(product)
prod_uom = self.env['product.uom'].browse(uom)
if prod.uom_id.category_id != prod_uom.category_id:
warning = {
'title': _('Warning!'),
'message': _('The selected unit of measure is not compatible with the unit of measure of the product.'),
}
result['value']['uos_id'] = prod.uom_id.id
if warning:
result['warning'] = warning
return result
@api.model
def move_line_get(self, invoice_id):
inv = self.env['account.invoice'].browse(invoice_id)
currency = inv.currency_id.with_context(date=inv.date_invoice)
company_currency = inv.company_id.currency_id
res = []
for line in inv.invoice_line:
mres = self.move_line_get_item(line)
mres['invl_id'] = line.id
res.append(mres)
tax_code_found = False
taxes = line.invoice_line_tax_id.compute_all(
(line.price_unit * (1.0 - (line.discount or 0.0) / 100.0)),
line.quantity, line.product_id, inv.partner_id)['taxes']
for tax in taxes:
if inv.type in ('out_invoice', 'in_invoice'):
tax_code_id = tax['base_code_id']
tax_amount = line.price_subtotal * tax['base_sign']
else:
tax_code_id = tax['ref_base_code_id']
tax_amount = line.price_subtotal * tax['ref_base_sign']
if tax_code_found:
if not tax_code_id:
continue
res.append(dict(mres))
res[-1]['price'] = 0.0
res[-1]['account_analytic_id'] = False
elif not tax_code_id:
continue
tax_code_found = True
res[-1]['tax_code_id'] = tax_code_id
res[-1]['tax_amount'] = currency.compute(tax_amount, company_currency)
return res
@api.model
def move_line_get_item(self, line):
return {
'type': 'src',
'name': line.name.split('\n')[0][:64],
'price_unit': line.price_unit,
'quantity': line.quantity,
'price': line.price_subtotal,
'account_id': line.account_id.id,
'product_id': line.product_id.id,
'uos_id': line.uos_id.id,
'account_analytic_id': line.account_analytic_id.id,
'taxes': line.invoice_line_tax_id,
}
#
# Set the tax field according to the account and the fiscal position
#
@api.multi
def onchange_account_id(self, product_id, partner_id, inv_type, fposition_id, account_id):
if not account_id:
return {}
unique_tax_ids = []
account = self.env['account.account'].browse(account_id)
if not product_id:
fpos = self.env['account.fiscal.position'].browse(fposition_id)
unique_tax_ids = fpos.map_tax(account.tax_ids).ids
else:
product_change_result = self.product_id_change(product_id, False, type=inv_type,
partner_id=partner_id, fposition_id=fposition_id, company_id=account.company_id.id)
if 'invoice_line_tax_id' in product_change_result.get('value', {}):
unique_tax_ids = product_change_result['value']['invoice_line_tax_id']
return {'value': {'invoice_line_tax_id': unique_tax_ids}}
class account_invoice_tax(models.Model):
_name = "account.invoice.tax"
_description = "Invoice Tax"
_order = 'sequence'
@api.one
@api.depends('base', 'base_amount', 'amount', 'tax_amount')
def _compute_factors(self):
self.factor_base = self.base_amount / self.base if self.base else 1.0
self.factor_tax = self.tax_amount / self.amount if self.amount else 1.0
invoice_id = fields.Many2one('account.invoice', string='Invoice Line',
ondelete='cascade', index=True)
name = fields.Char(string='Tax Description',
required=True)
account_id = fields.Many2one('account.account', string='Tax Account',
required=True, domain=[('type', 'not in', ['view', 'income', 'closed'])])
account_analytic_id = fields.Many2one('account.analytic.account', string='Analytic account')
base = fields.Float(string='Base', digits=dp.get_precision('Account'))
amount = fields.Float(string='Amount', digits=dp.get_precision('Account'))
manual = fields.Boolean(string='Manual', default=True)
sequence = fields.Integer(string='Sequence',
help="Gives the sequence order when displaying a list of invoice tax.")
base_code_id = fields.Many2one('account.tax.code', string='Base Code',
help="The account basis of the tax declaration.")
base_amount = fields.Float(string='Base Code Amount', digits=dp.get_precision('Account'),
default=0.0)
tax_code_id = fields.Many2one('account.tax.code', string='Tax Code',
help="The tax basis of the tax declaration.")
tax_amount = fields.Float(string='Tax Code Amount', digits=dp.get_precision('Account'),
default=0.0)
company_id = fields.Many2one('res.company', string='Company',
related='account_id.company_id', store=True, readonly=True)
factor_base = fields.Float(string='Multipication factor for Base code',
compute='_compute_factors')
factor_tax = fields.Float(string='Multipication factor Tax code',
compute='_compute_factors')
@api.multi
def base_change(self, base, currency_id=False, company_id=False, date_invoice=False):
factor = self.factor_base if self else 1
company = self.env['res.company'].browse(company_id)
if currency_id and company.currency_id:
currency = self.env['res.currency'].browse(currency_id)
currency = currency.with_context(date=date_invoice or fields.Date.context_today(self))
base = currency.compute(base * factor, company.currency_id, round=False)
return {'value': {'base_amount': base}}
@api.multi
def amount_change(self, amount, currency_id=False, company_id=False, date_invoice=False):
company = self.env['res.company'].browse(company_id)
if currency_id and company.currency_id:
currency = self.env['res.currency'].browse(currency_id)
currency = currency.with_context(date=date_invoice or fields.Date.context_today(self))
amount = currency.compute(amount, company.currency_id, round=False)
return {'value': {'tax_amount': amount}}
@api.v8
def compute(self, invoice):
tax_grouped = {}
currency = invoice.currency_id.with_context(date=invoice.date_invoice or fields.Date.context_today(invoice))
company_currency = invoice.company_id.currency_id
for line in invoice.invoice_line:
taxes = line.invoice_line_tax_id.compute_all(
(line.price_unit * (1 - (line.discount or 0.0) / 100.0)),
line.quantity, line.product_id, invoice.partner_id)['taxes']
for tax in taxes:
val = {
'invoice_id': invoice.id,
'name': tax['name'],
'amount': tax['amount'],
'manual': False,
'sequence': tax['sequence'],
'base': currency.round(tax['price_unit'] * line['quantity']),
}
if invoice.type in ('out_invoice','in_invoice'):
val['base_code_id'] = tax['base_code_id']
val['tax_code_id'] = tax['tax_code_id']
val['base_amount'] = currency.compute(val['base'] * tax['base_sign'], company_currency, round=False)
val['tax_amount'] = currency.compute(val['amount'] * tax['tax_sign'], company_currency, round=False)
val['account_id'] = tax['account_collected_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_collected_id']
else:
val['base_code_id'] = tax['ref_base_code_id']
val['tax_code_id'] = tax['ref_tax_code_id']
val['base_amount'] = currency.compute(val['base'] * tax['ref_base_sign'], company_currency, round=False)
val['tax_amount'] = currency.compute(val['amount'] * tax['ref_tax_sign'], company_currency, round=False)
val['account_id'] = tax['account_paid_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_paid_id']
# If the taxes generate moves on the same financial account as the invoice line
# and no default analytic account is defined at the tax level, propagate the
# analytic account from the invoice line to the tax line. This is necessary
# in situations were (part of) the taxes cannot be reclaimed,
# to ensure the tax move is allocated to the proper analytic account.
if not val.get('account_analytic_id') and line.account_analytic_id and val['account_id'] == line.account_id.id:
val['account_analytic_id'] = line.account_analytic_id.id
key = (val['tax_code_id'], val['base_code_id'], val['account_id'])
if not key in tax_grouped:
tax_grouped[key] = val
else:
tax_grouped[key]['base'] += val['base']
tax_grouped[key]['amount'] += val['amount']
tax_grouped[key]['base_amount'] += val['base_amount']
tax_grouped[key]['tax_amount'] += val['tax_amount']
for t in tax_grouped.values():
t['base'] = currency.round(t['base'])
t['amount'] = currency.round(t['amount'])
t['base_amount'] = currency.round(t['base_amount'])
t['tax_amount'] = currency.round(t['tax_amount'])
return tax_grouped
@api.v7
def compute(self, cr, uid, invoice_id, context=None):
recs = self.browse(cr, uid, [], context)
invoice = recs.env['account.invoice'].browse(invoice_id)
return recs.compute(invoice)
@api.model
def move_line_get(self, invoice_id):
res = []
self._cr.execute(
'SELECT * FROM account_invoice_tax WHERE invoice_id = %s',
(invoice_id,)
)
for row in self._cr.dictfetchall():
if not (row['amount'] or row['tax_code_id'] or row['tax_amount']):
continue
res.append({
'type': 'tax',
'name': row['name'],
'price_unit': row['amount'],
'quantity': 1,
'price': row['amount'] or 0.0,
'account_id': row['account_id'],
'tax_code_id': row['tax_code_id'],
'tax_amount': row['tax_amount'],
'account_analytic_id': row['account_analytic_id'],
})
return res
class res_partner(models.Model):
# Inherits partner and adds invoice information in the partner form
_inherit = 'res.partner'
invoice_ids = fields.One2many('account.invoice', 'partner_id', string='Invoices',
readonly=True, copy=False)
def _find_accounting_partner(self, partner):
'''
Find the partner for which the accounting entries will be created
'''
return partner.commercial_partner_id
class mail_compose_message(models.Model):
_inherit = 'mail.compose.message'
@api.multi
def send_mail(self):
context = self._context
if context.get('default_model') == 'account.invoice' and \
context.get('default_res_id') and context.get('mark_invoice_as_sent'):
invoice = self.env['account.invoice'].browse(context['default_res_id'])
invoice = invoice.with_context(mail_post_autofollow=True)
invoice.write({'sent': True})
invoice.message_post(body=_("Invoice sent"))
return super(mail_compose_message, self).send_mail()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
goolsbys/flint-water | node_modules/node-gyp/gyp/pylib/gyp/mac_tool.py | 1569 | 23354 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest, convert_to_binary):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest, convert_to_binary)
else:
shutil.copy(source, dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices',
'--output-format', 'human-readable-text', '--compile', dest, source]
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _ConvertToBinary(self, dest):
subprocess.check_call([
'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest])
def _CopyStringsFile(self, source, dest, convert_to_binary):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile(r'[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$')
libtool_re5 = re.compile(
r'^.*libtool: warning for library: ' +
r'.* the table of contents is empty ' +
r'\(no object file members in the library define global symbols\)$')
env = os.environ.copy()
# Ref:
# http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c
# The problem with this flag is that it resets the file mtime on the file to
# epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.
env['ZERO_AR_DATE'] = '1'
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line) and not libtool_re5.match(line):
print >>sys.stderr, line
# Unconditionally touch the output .a file on the command line if present
# and the command succeeded. A bit hacky.
if not libtoolout.returncode:
for i in range(len(cmd_list) - 1):
if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'):
os.utime(cmd_list[i+1], None)
break
return libtoolout.returncode
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCompileXcassets(self, keys, *inputs):
"""Compiles multiple .xcassets files into a single .car file.
This invokes 'actool' to compile all the inputs .xcassets files. The
|keys| arguments is a json-encoded dictionary of extra arguments to
pass to 'actool' when the asset catalogs contains an application icon
or a launch image.
Note that 'actool' does not create the Assets.car file if the asset
catalogs does not contains imageset.
"""
command_line = [
'xcrun', 'actool', '--output-format', 'human-readable-text',
'--compress-pngs', '--notices', '--warnings', '--errors',
]
is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ
if is_iphone_target:
platform = os.environ['CONFIGURATION'].split('-')[-1]
if platform not in ('iphoneos', 'iphonesimulator'):
platform = 'iphonesimulator'
command_line.extend([
'--platform', platform, '--target-device', 'iphone',
'--target-device', 'ipad', '--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile',
os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']),
])
else:
command_line.extend([
'--platform', 'macosx', '--target-device', 'mac',
'--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'],
'--compile',
os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']),
])
if keys:
keys = json.loads(keys)
for key, value in keys.iteritems():
arg_name = '--' + key
if isinstance(value, bool):
if value:
command_line.append(arg_name)
elif isinstance(value, list):
for v in value:
command_line.append(arg_name)
command_line.append(str(v))
else:
command_line.append(arg_name)
command_line.append(str(value))
# Note: actool crashes if inputs path are relative, so use os.path.abspath
# to get absolute path name for inputs.
command_line.extend(map(os.path.abspath, inputs))
subprocess.check_call(command_line)
def ExecMergeInfoPlist(self, output, *inputs):
"""Merge multiple .plist files into a single .plist file."""
merged_plist = {}
for path in inputs:
plist = self._LoadPlistMaybeBinary(path)
self._MergePlist(merged_plist, plist)
plistlib.writePlist(merged_plist, output)
def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. copy ResourceRules.plist from the user or the SDK into the bundle,
2. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
3. copy Entitlements.plist from user or SDK next to the bundle,
4. code sign the bundle.
"""
resource_rules_path = self._InstallResourceRules(resource_rules)
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--resource-rules',
resource_rules_path, '--entitlements', entitlements_path,
os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallResourceRules(self, resource_rules):
"""Installs ResourceRules.plist from user or SDK into the bundle.
Args:
resource_rules: string, optional, path to the ResourceRules.plist file
to use, default to "${SDKROOT}/ResourceRules.plist"
Returns:
Path to the copy of ResourceRules.plist into the bundle.
"""
source_path = resource_rules
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'ResourceRules.plist')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'], 'ResourceRules.plist')
shutil.copy2(source_path, target_path)
return target_path
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _MergePlist(self, merged_plist, plist):
"""Merge |plist| into |merged_plist|."""
for key, value in plist.iteritems():
if isinstance(value, dict):
merged_value = merged_plist.get(key, {})
if isinstance(merged_value, dict):
self._MergePlist(merged_value, value)
merged_plist[key] = merged_value
else:
merged_plist[key] = value
else:
merged_plist[key] = value
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return {k: self._ExpandVariables(data[k], substitutions) for k in data}
return data
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
tigerpulma/RASdecoder | Tapaoux.py | 6 | 2292 | #!/usr/bin/env python
'''
Tapaoux Config Extractor
'''
__description__ = 'Tapaoux Config Extractor'
__author__ = 'Kevin Breen http://techanarchy.net http://malwareconfig.com'
__version__ = '0.1'
__date__ = '2015/05/11'
import re
import sys
import string
from operator import xor
from optparse import OptionParser
keys = ['Error Code', 'Last Error', '(R) Microsoft Corporation.']
marker_1 = '\xFF\xC3\x4C\xFF\xFC\xCC\x22\xCC\xAA\xAF\x32\x00\x0A\x7C\x44\x4D'
marker_2 = '\xFF\x3C\xC4\xFF\xFC\xCC\x22\xCC\xAA\xAF\x32\x00\x0A\x7C\x44\x4D'
def string_clean(line):
return ''.join((char for char in line if 32< ord(char) < 127))
def find_config(file_data):
split_data = file_data.split(marker_1)
if len(split_data) < 2:
split_data = file_data.split(marker_2)
if len(split_data) == 2:
return split_data[1][:500]
def config_decrypt(keys, data):
for enc_key in keys:
print " [-] Testing for Key: {0}".format(enc_key)
key_pointer = 0
decoded = ''
for i in range(len(data)):
if key_pointer >= len(enc_key):
key_pointer = 0
data_slice = ord(data[i])
key_slice = ord(enc_key[key_pointer])
if data_slice == key_slice or data[i].encode('hex') == '00':
decoded += data[i]
else:
decoded += chr(xor(data_slice, key_slice))
key_pointer += 1
conf_test = re.search('/[a-zA-Z0-9-]*\x2ephp', decoded)
if conf_test:
print " [-] Found Config"
return string_clean(decoded)
if __name__ == "__main__":
parser = OptionParser(usage='usage: %prog inFile\n' + __description__, version='%prog ' + __version__)
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit()
print "[+] Reading File"
file_data = open(args[0], 'rb').read()
print " [-] Searching for config"
config_section = find_config(file_data)
if config_section == None:
print "[!] Config Not Found"
sys.exit()
dec_config = config_decrypt(keys, config_section)
print "----------------------"
print dec_config
print "----------------------"
print "[+] Complete"
| gpl-3.0 |
mmiklavc/metron | metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/metron_service.py | 9 | 23287 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import os
from datetime import datetime
from metron_security import kinit
from resource_management.core.exceptions import ComponentIsNotRunning
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Directory, File
from resource_management.core.resources.system import Execute
from resource_management.core.source import InlineTemplate
from resource_management.core.source import Template
from resource_management.libraries.functions import format as ambari_format
from resource_management.libraries.functions.get_user_call_output import \
get_user_call_output
from resource_management.libraries.script import Script
def is_zk_configured(params):
return os.path.isfile(params.zk_configured_flag_file)
def init_zk_config(params):
Logger.info('Loading ALL Metron config into ZooKeeper - this command should ONLY be executed by Ambari on initial install.')
Execute(ambari_format(
"{metron_home}/bin/zk_load_configs.sh --zk_quorum {zookeeper_quorum} --mode PUSH --input_dir {metron_zookeeper_config_path}"),
path=ambari_format("{java_home}/bin")
)
def set_configured(user, flag_file, log_msg):
Logger.info(log_msg)
File(flag_file,
content="This file created on: " + datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
owner=user,
mode=0755)
def set_zk_configured(params):
set_configured(params.metron_user, params.zk_configured_flag_file, "Setting Zookeeper configured to true")
def solr_global_config_patches():
"""
Builds the global configuration patches required for Solr.
"""
return """
{
"op": "add",
"path": "/solr.zookeeper",
"value": "{{solr_zookeeper_url}}"
}
"""
def elasticsearch_global_config_patches():
"""
Builds the global configuration patches required for Elasticsearch.
"""
return """
{
"op": "add",
"path": "/es.clustername",
"value": "{{ es_cluster_name }}"
},
{
"op": "add",
"path": "/es.ip",
"value": "{{ es_url }}"
},
{
"op": "add",
"path": "/es.date.format",
"value": "{{es_date_format}}"
}
"""
def build_global_config_patch(params, patch_file):
"""
Build the file used to patch the global configuration.
See RFC 6902 at https://tools.ietf.org/html/rfc6902
:param params:
:param patch_file: The path where the patch file will be created.
"""
if params.ra_indexing_writer == 'Solr':
indexing_patches = solr_global_config_patches()
else:
indexing_patches = elasticsearch_global_config_patches()
other_patches = """
{
"op": "add",
"path": "/profiler.client.period.duration",
"value": "{{profiler_period_duration}}"
},
{
"op": "add",
"path": "/profiler.client.period.duration.units",
"value": "{{profiler_period_units}}"
},
{
"op": "add",
"path": "/parser.error.topic",
"value": "{{parser_error_topic}}"
},
{
"op": "add",
"path": "/enrichment.list.hbase.provider.impl",
"value": "{{enrichment_list_hbase_provider_impl}}"
},
{
"op": "add",
"path": "/enrichment.list.hbase.table",
"value": "{{enrichment_list_hbase_table}}"
},
{
"op": "add",
"path": "/enrichment.list.hbase.cf",
"value": "{{enrichment_list_hbase_cf}}"
},
{
"op": "add",
"path": "/update.hbase.table",
"value": "{{update_hbase_table}}"
},
{
"op": "add",
"path": "/update.hbase.cf",
"value": "{{update_hbase_cf}}"
},
{
"op": "add",
"path": "/user.settings.hbase.table",
"value": "{{user_settings_hbase_table}}"
},
{
"op": "add",
"path": "/user.settings.hbase.cf",
"value": "{{user_settings_hbase_cf}}"
},
{
"op": "add",
"path": "/bootstrap.servers",
"value": "{{kafka_brokers}}"
},
{
"op": "add",
"path": "/source.type.field",
"value": "{{source_type_field}}"
},
{
"op": "add",
"path": "/threat.triage.score.field",
"value": "{{threat_triage_score_field}}"
},
{
"op": "add",
"path": "/enrichment.writer.batchSize",
"value": "{{enrichment_kafka_writer_batch_size}}"
},
{
"op": "add",
"path": "/enrichment.writer.batchTimeout",
"value": "{{enrichment_kafka_writer_batch_timeout}}"
},
{
"op": "add",
"path": "/profiler.writer.batchSize",
"value": "{{profiler_kafka_writer_batch_size}}"
},
{
"op": "add",
"path": "/profiler.writer.batchTimeout",
"value": "{{profiler_kafka_writer_batch_timeout}}"
}
"""
patch_template = ambari_format(
"""
[
{indexing_patches},
{other_patches}
]
""")
File(patch_file,
content=InlineTemplate(patch_template),
owner=params.metron_user,
group=params.metron_group)
def patch_global_config(params):
patch_file = "/tmp/metron-global-config-patch.json"
Logger.info("Setup temporary global config JSON patch (formatting per RFC6902): " + patch_file)
build_global_config_patch(params, patch_file)
Logger.info('Patching global config in ZooKeeper')
Execute(ambari_format(
"{metron_home}/bin/zk_load_configs.sh --zk_quorum {zookeeper_quorum} --mode PATCH --config_type GLOBAL --patch_file " + patch_file),
path=ambari_format("{java_home}/bin")
)
Logger.info("Done patching global config")
def pull_config(params):
Logger.info('Pulling all Metron configs down from ZooKeeper to local file system')
Logger.info('NOTE - THIS IS OVERWRITING THE LOCAL METRON CONFIG DIR WITH ZOOKEEPER CONTENTS: ' + params.metron_zookeeper_config_path)
Execute(ambari_format(
"{metron_home}/bin/zk_load_configs.sh --zk_quorum {zookeeper_quorum} --mode PULL --output_dir {metron_zookeeper_config_path} --force"),
path=ambari_format("{java_home}/bin")
)
def refresh_configs(params):
if not is_zk_configured(params):
Logger.warning("The expected flag file '" + params.zk_configured_flag_file + "'indicating that Zookeeper has been configured does not exist. Skipping patching. An administrator should look into this.")
return
check_indexer_parameters()
patch_global_config(params)
pull_config(params)
def get_running_topologies(params):
Logger.info('Getting Running Storm Topologies from Storm REST Server')
Logger.info('Security enabled? ' + str(params.security_enabled))
# Want to sudo to the metron user and kinit as them so we aren't polluting root with Metron's Kerberos tickets.
# This is becuase we need to run a command with a return as the metron user. Sigh
negotiate = '--negotiate -u : ' if params.security_enabled else ''
cmd = ambari_format(
'curl --max-time 3 ' + negotiate + '{storm_rest_addr}/api/v1/topology/summary')
if params.security_enabled:
kinit(params.kinit_path_local,
params.metron_keytab_path,
params.metron_principal_name,
execute_user=params.metron_user)
Logger.info('Running cmd: ' + cmd)
return_code, stdout, stderr = get_user_call_output(cmd,
user=params.metron_user,
is_checked_call=False)
if (return_code != 0):
return {}
try:
stormjson = json.loads(stdout)
except ValueError, e:
Logger.info('Stdout: ' + str(stdout))
Logger.info('Stderr: ' + str(stderr))
Logger.exception(str(e))
return {}
topologiesDict = {}
for topology in stormjson['topologies']:
topologiesDict[topology['name']] = topology['status']
Logger.info("Topologies: " + str(topologiesDict))
return topologiesDict
def init_kafka_topics(params, topics):
Logger.info('Creating Kafka topics')
# Create the topics. All the components need indexing (for errors), so we pass '--if-not-exists'.
command_template = """{0}/kafka-topics.sh \
--zookeeper {1} \
--create \
--if-not-exists \
--topic {2} \
--partitions {3} \
--replication-factor {4} \
--config retention.bytes={5}"""
num_partitions = 1
replication_factor = 1
retention_gigabytes = int(params.metron_topic_retention)
retention_bytes = retention_gigabytes * 1024 * 1024 * 1024
for topic in topics:
Logger.info("Creating topic'{0}'".format(topic))
Execute(command_template.format(params.kafka_bin_dir,
params.zookeeper_quorum,
topic,
num_partitions,
replication_factor,
retention_bytes),
user=params.kafka_user, tries=3, try_sleep=5, logoutput=True)
Logger.info("Done creating Kafka topics")
def init_kafka_acls(params, topics):
Logger.info('Creating Kafka topic ACLs')
acl_template = """{0}/kafka-acls.sh \
--authorizer kafka.security.auth.SimpleAclAuthorizer \
--authorizer-properties zookeeper.connect={1} \
--add \
--allow-principal User:{2} \
--topic {3}"""
for topic in topics:
Logger.info("Creating ACL for topic '{0}'".format(topic))
Execute(acl_template.format(params.kafka_bin_dir,
params.zookeeper_quorum,
params.metron_user,
topic),
user=params.kafka_user, tries=3, try_sleep=5, logoutput=True)
def init_kafka_acl_groups(params, groups):
Logger.info('Creating Kafka group ACLs')
acl_template = """{0}/kafka-acls.sh \
--authorizer kafka.security.auth.SimpleAclAuthorizer \
--authorizer-properties zookeeper.connect={1} \
--add \
--allow-principal User:{2} \
--group {3}"""
for group in groups:
Logger.info("Creating ACL for group '{0}'".format(group))
Execute(acl_template.format(params.kafka_bin_dir,
params.zookeeper_quorum,
params.metron_user,
group),
user=params.kafka_user, tries=3, try_sleep=5, logoutput=True)
def execute(cmd, user, err_msg=None, tries=3, try_sleep=5, logoutput=True, path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'):
"""
Executes a command and raises an appropriate error message if the command
fails.
:param cmd: The command to execute.
:param user: The user to execute the command as.
:param err_msg: The error message to display if the command fails.
:param tries: The number of attempts to execute the command.
:param try_sleep: The time between attempts.
:param logoutput: If true, log the command output.
:param path: The path use when running the command.
:return:
"""
try:
Execute(cmd, tries=tries, try_sleep=try_sleep, logoutput=logoutput, user=user, path=path)
except:
if err_msg is None:
err_msg = "Execution failed: cmd={0}, user={1}".format(cmd, user)
raise Fail(err_msg)
def check_kafka_topics(params, topics):
"""
Validates that the Kafka topics exist. An exception is raised if any of the
topics do not exist.
:param params:
:param topics: A list of topic names.
"""
# if needed kinit as 'metron'
if params.security_enabled:
kinit(params.kinit_path_local,
params.metron_keytab_path,
params.metron_principal_name,
execute_user=params.metron_user)
template = """{0}/kafka-topics.sh \
--zookeeper {1} \
--list | \
awk 'BEGIN {{cnt=0;}} /{2}/ {{cnt++}} END {{if (cnt > 0) {{exit 0}} else {{exit 1}}}}'"""
for topic in topics:
Logger.info("Checking existence of Kafka topic '{0}'".format(topic))
cmd = template.format(params.kafka_bin_dir, params.zookeeper_quorum, topic)
err_msg = "Missing Kafka topic; topic={0}".format(topic)
execute(cmd, user=params.kafka_user, err_msg=err_msg)
def create_hbase_table(params, table, cf):
"""
Creates an HBase table, if the table does not currently exist
:param params:
:param table: The name of the HBase table.
:param cf: The column family
:param user: The user to execute the command as
"""
if params.security_enabled:
kinit(params.kinit_path_local,
params.hbase_keytab_path,
params.hbase_principal_name,
execute_user=params.hbase_user)
cmd = """if [[ $(echo \"exists '{0}'\" | hbase shell | grep 'not exist') ]]; \
then echo \"create '{0}','{1}'\" | hbase shell -n; fi"""
add_update_cmd = cmd.format(table, cf)
Execute(add_update_cmd,
tries=3,
try_sleep=5,
logoutput=False,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
user=params.hbase_user
)
def check_hbase_table(params, table):
"""
Validates that an HBase table exists. An exception is raised if the table
does not exist.
:param params:
:param table: The name of the HBase table.
"""
Logger.info("Checking HBase table '{0}'".format(table))
# if needed kinit as 'hbase'
if params.security_enabled:
kinit(params.kinit_path_local,
params.hbase_keytab_path,
params.hbase_principal_name,
execute_user=params.hbase_user)
template = "echo \"exists '{0}'\" | hbase shell -n | grep 'Table {1} does exist'"
cmd = template.format(table, table)
err_msg = "Missing HBase table; table={0}".format(table)
execute(cmd, user=params.hbase_user, err_msg=err_msg)
def check_hbase_column_family(params, table, column_family):
"""
Validates that an HBase column family exists. An exception is raised if the
column family does not exist.
:param params:
:param table: The name of the HBase table.
:param column_family: The name of the HBase column family.
"""
Logger.info("Checking column family '{0}:{1}'".format(table, column_family))
# if needed kinit as 'hbase'
if params.security_enabled:
kinit(params.kinit_path_local,
params.hbase_keytab_path,
params.hbase_principal_name,
execute_user=params.hbase_user)
template = "echo \"desc '{0}'\" | hbase shell -n | grep \"NAME => '{1}'\""
cmd = template.format(table, column_family)
err_msg = "Missing HBase column family; table={0}, cf={1}".format(table, column_family)
execute(cmd, user=params.hbase_user, err_msg=err_msg)
def check_hbase_acls(params, table, user=None, permissions="READ,WRITE"):
"""
Validates that HBase table permissions exist for a user. An exception is
raised if the permissions do not exist.
:param params:
:param table: The name of the HBase table.
:param user: The name of the user.
:param permissions: The permissions that should exist.
"""
if user is None:
user = params.metron_user
Logger.info("Checking HBase ACLs; table={0}, user={1}, permissions={2}".format(table, user, permissions))
# if needed kinit as 'hbase'
if params.security_enabled:
kinit(params.kinit_path_local,
params.hbase_keytab_path,
params.hbase_principal_name,
execute_user=params.hbase_user)
template = """echo "user_permission '{0}'" | \
hbase shell -n | \
grep " {1} " | \
grep "actions={2}"
"""
cmd = template.format(table, user, permissions)
err_msg = "Missing HBase access; table={0}, user={1}, permissions={2}".format(table, user, permissions)
execute(cmd, user=params.hbase_user, err_msg=err_msg)
def check_hdfs_dir_exists(params, path, user=None):
"""
Validate that a directory exists in HDFS.
:param params:
:param path: The directory path in HDFS.
:param user: The user to execute the check under.
"""
if user is None:
user = params.metron_user
Logger.info("Checking HDFS; directory={0} user={1}".format(path, user))
# if needed kinit as 'metron'
if params.security_enabled:
kinit(params.kinit_path_local,
params.metron_keytab_path,
params.metron_principal_name,
execute_user=params.metron_user)
template = "{0}/hdfs dfs -test -d {1}"
cmd = template.format(params.hadoop_bin_dir, path)
err_msg = "Missing directory in HDFS: directory={0} user={1}".format(path, user)
execute(cmd, user=params.metron_user, err_msg=err_msg)
def check_hdfs_file_exists(params, path, user=None):
"""
Validate that a file exists in HDFS.
:param params:
:param path: The file path in HDFS.
:param user: The user to execute the check under.
"""
if user is None:
user = params.metron_user
Logger.info("Checking HDFS; file={0}, user={1}".format(path, user))
# if needed kinit as 'metron'
if params.security_enabled:
kinit(params.kinit_path_local,
params.metron_keytab_path,
params.metron_principal_name,
execute_user=params.metron_user)
template = "{0}/hdfs dfs -test -f {1}"
cmd = template.format(params.hadoop_bin_dir, path)
err_msg = "Missing file in HDFS; file={0}".format(path)
execute(cmd, user=user, err_msg=err_msg)
def check_kafka_acls(params, topics, user=None):
"""
Validate that permissions have been granted for a list of Kakfa topics.
:param params:
:param topics: A list of topic names.
:param user: The user whose access is checked.
"""
if user is None:
user = params.metron_user
template = """{0}/kafka-acls.sh \
--authorizer kafka.security.auth.SimpleAclAuthorizer \
--authorizer-properties zookeeper.connect={1} \
--topic {2} \
--list | grep 'User:{3}'"""
for topic in topics:
Logger.info("Checking ACL; topic={0}, user={1}'".format(topic, user))
cmd = template.format(params.kafka_bin_dir, params.zookeeper_quorum, topic, user)
err_msg = "Missing Kafka access; topic={0}, user={1}".format(topic, user)
execute(cmd, user=params.kafka_user, err_msg=err_msg)
def check_kafka_acl_groups(params, groups, user=None):
"""
Validate that Kafka group permissions have been granted.
:param params:
:param groups: A list of group name.
:param user: The user whose access is checked.
"""
if user is None:
user = params.metron_user
template = """{0}/kafka-acls.sh \
--authorizer kafka.security.auth.SimpleAclAuthorizer \
--authorizer-properties zookeeper.connect={1} \
--group {2} \
--list | grep 'User:{3}'"""
for group in groups:
Logger.info("Checking group ACL for topic '{0}'".format(group))
cmd = template.format(params.kafka_bin_dir, params.zookeeper_quorum, group, user)
err_msg = "Missing Kafka group access; group={0}, user={1}".format(group, user)
execute(cmd, user=params.kafka_user, err_msg=err_msg)
def check_http(host, port, user):
"""
Check for a valid HTTP response.
:param hostname: The hostname.
:param port: The port number.
:param user: Execute the HTTP request as.
"""
cmd = "curl -sS --max-time 3 {0}:{1}".format(host, port)
Logger.info('Checking HTTP connectivity; host={0}, port={1}, user={2} cmd={3}'.format(host, port, user, cmd))
try:
Execute(cmd, tries=3, try_sleep=5, logoutput=False, user=user)
except:
raise ComponentIsNotRunning()
def check_indexer_parameters():
"""
Ensure that all required parameters have been defined for the chosen
Indexer; either Solr or Elasticsearch.
"""
missing = []
config = Script.get_config()
indexer = config['configurations']['metron-indexing-env']['ra_indexing_writer']
Logger.info('Checking parameters for indexer = ' + indexer)
if indexer == 'Solr':
# check for all required solr parameters
if not config['configurations']['metron-env']['solr_zookeeper_url']:
missing.append("metron-env/solr_zookeeper_url")
else:
# check for all required elasticsearch parameters
if not config['configurations']['metron-env']['es_cluster_name']:
missing.append("metron-env/es_cluster_name")
if not config['configurations']['metron-env']['es_hosts']:
missing.append("metron-env/es_hosts")
if not config['configurations']['metron-env']['es_date_format']:
missing.append("metron-env/es_date_format")
if len(missing) > 0:
raise Fail("Missing required indexing parameters(s): indexer={0}, missing={1}".format(indexer, missing))
def install_metron_knox(params):
if os.path.exists(params.knox_home):
template = """export KNOX_HOME={0}; \
export KNOX_USER={1}; \
export KNOX_GROUP={2}; \
{3}/bin/install_metron_knox.sh; \
unset KNOX_USER; \
unset KNOX_GROUP; \
unset KNOX_HOME;"""
cmd = template.format(params.knox_home, params.knox_user, params.knox_group, params.metron_home)
Execute(cmd)
set_metron_knox_installed(params)
def is_metron_knox_installed(params):
return os.path.isfile(params.metron_knox_installed_flag_file)
def set_metron_knox_installed(params):
Directory(params.metron_zookeeper_config_path,
mode=0755,
owner=params.metron_user,
group=params.metron_group,
create_parents=True
)
set_configured(params.metron_user, params.metron_knox_installed_flag_file, "Setting Metron Knox installed to true")
def metron_knox_topology_setup(params):
if os.path.exists(params.knox_home):
File(ambari_format("{knox_home}/conf/topologies/metron.xml"),
content=Template("metron.xml.j2"),
owner=params.knox_user,
group=params.knox_group
)
File(ambari_format("{knox_home}/conf/topologies/metronsso.xml"),
content=Template("metronsso.xml.j2"),
owner=params.knox_user,
group=params.knox_group
)
| apache-2.0 |
florianpaquet/sublime-sync | requests/packages/charade/jpcntx.py | 151 | 19323 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .compat import wrap_ord
NUM_OF_CATEGORY = 6
DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = (
(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
)
class JapaneseContextAnalysis:
def __init__(self):
self.reset()
def reset(self):
self._mTotalRel = 0 # total sequence received
# category counters, each interger counts sequence in its category
self._mRelSample = [0] * NUM_OF_CATEGORY
# if last byte in current buffer is not the last byte of a character,
# we need to know how many bytes to skip in next buffer
self._mNeedToSkipCharNum = 0
self._mLastCharOrder = -1 # The order of previous char
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
def feed(self, aBuf, aLen):
if self._mDone:
return
# The buffer we got is byte oriented, and a character may span in more than one
# buffers. In case the last one or two byte in last buffer is not
# complete, we record how many byte needed to complete that character
# and skip these bytes here. We can choose to record those bytes as
# well and analyse the character once it is complete, but since a
# character will not make much difference, by simply skipping
# this character will simply our logic and improve performance.
i = self._mNeedToSkipCharNum
while i < aLen:
order, charLen = self.get_order(aBuf[i:i + 2])
i += charLen
if i > aLen:
self._mNeedToSkipCharNum = i - aLen
self._mLastCharOrder = -1
else:
if (order != -1) and (self._mLastCharOrder != -1):
self._mTotalRel += 1
if self._mTotalRel > MAX_REL_THRESHOLD:
self._mDone = True
break
self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1
self._mLastCharOrder = order
def got_enough_data(self):
return self._mTotalRel > ENOUGH_REL_THRESHOLD
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
else:
return DONT_KNOW
def get_order(self, aBuf):
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)):
charLen = 2
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 202) and (0x9F <= second_char <= 0xF1):
return second_char - 0x9F, charLen
return -1, charLen
class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
charLen = 2
elif first_char == 0x8F:
charLen = 3
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
return second_char - 0xA1, charLen
return -1, charLen
# flake8: noqa
| mit |
hdinsight/hue | desktop/core/ext-py/pysaml2-2.4.0/src/saml2/client_base.py | 31 | 32142 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""Contains classes and functions that a SAML2.0 Service Provider (SP) may use
to conclude its tasks.
"""
import threading
from urllib import urlencode
from urlparse import urlparse
from saml2.entity import Entity
from saml2.mdstore import destinations
from saml2.profile import paos, ecp
from saml2.saml import NAMEID_FORMAT_TRANSIENT
from saml2.samlp import AuthnQuery, RequestedAuthnContext
from saml2.samlp import NameIDMappingRequest
from saml2.samlp import AttributeQuery
from saml2.samlp import AuthzDecisionQuery
from saml2.samlp import AuthnRequest
import saml2
import time
from saml2.soap import make_soap_enveloped_saml_thingy
from urlparse import parse_qs
from saml2.s_utils import signature, UnravelError
from saml2.s_utils import do_attributes
from saml2 import samlp, BINDING_SOAP, SAMLError
from saml2 import saml
from saml2 import soap
from saml2.population import Population
from saml2.response import AttributeResponse, StatusError
from saml2.response import AuthzResponse
from saml2.response import AssertionIDResponse
from saml2.response import AuthnQueryResponse
from saml2.response import NameIDMappingResponse
from saml2.response import AuthnResponse
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_PAOS
import logging
logger = logging.getLogger(__name__)
SSO_BINDING = saml2.BINDING_HTTP_REDIRECT
FORM_SPEC = """<form method="post" action="%s">
<input type="hidden" name="SAMLRequest" value="%s" />
<input type="hidden" name="RelayState" value="%s" />
<input type="submit" value="Submit" />
</form>"""
LAX = False
ECP_SERVICE = "urn:oasis:names:tc:SAML:2.0:profiles:SSO:ecp"
ACTOR = "http://schemas.xmlsoap.org/soap/actor/next"
MIME_PAOS = "application/vnd.paos+xml"
class IdpUnspecified(SAMLError):
pass
class VerifyError(SAMLError):
pass
class LogoutError(SAMLError):
pass
class NoServiceDefined(SAMLError):
pass
class Base(Entity):
""" The basic pySAML2 service provider class """
def __init__(self, config=None, identity_cache=None, state_cache=None,
virtual_organization="", config_file=""):
"""
:param config: A saml2.config.Config instance
:param identity_cache: Where the class should store identity information
:param state_cache: Where the class should keep state information
:param virtual_organization: A specific virtual organization
"""
Entity.__init__(self, "sp", config, config_file, virtual_organization)
self.users = Population(identity_cache)
self.lock = threading.Lock()
# for server state storage
if state_cache is None:
self.state = {} # in memory storage
else:
self.state = state_cache
self.logout_requests_signed = False
self.allow_unsolicited = False
self.authn_requests_signed = False
self.want_assertions_signed = False
self.want_response_signed = False
for foo in ["allow_unsolicited", "authn_requests_signed",
"logout_requests_signed", "want_assertions_signed",
"want_response_signed"]:
v = self.config.getattr(foo, "sp")
if v is True or v == 'true':
setattr(self, foo, True)
self.artifact2response = {}
#
# Private methods
#
def _relay_state(self, session_id):
vals = [session_id, str(int(time.time()))]
if self.config.secret is None:
vals.append(signature("", vals))
else:
vals.append(signature(self.config.secret, vals))
return "|".join(vals)
def _sso_location(self, entityid=None, binding=BINDING_HTTP_REDIRECT):
if entityid:
# verify that it's in the metadata
srvs = self.metadata.single_sign_on_service(entityid, binding)
if srvs:
return destinations(srvs)[0]
else:
logger.info("_sso_location: %s, %s" % (entityid, binding))
raise IdpUnspecified("No IdP to send to given the premises")
# get the idp location from the metadata. If there is more than one
# IdP in the configuration raise exception
eids = self.metadata.with_descriptor("idpsso")
if len(eids) > 1:
raise IdpUnspecified("Too many IdPs to choose from: %s" % eids)
try:
srvs = self.metadata.single_sign_on_service(eids.keys()[0], binding)
return destinations(srvs)[0]
except IndexError:
raise IdpUnspecified("No IdP to send to given the premises")
def _my_name(self):
return self.config.name
#
# Public API
#
def add_vo_information_about_user(self, name_id):
""" Add information to the knowledge I have about the user. This is
for Virtual organizations.
:param name_id: The subject identifier
:return: A possibly extended knowledge.
"""
ava = {}
try:
(ava, _) = self.users.get_identity(name_id)
except KeyError:
pass
# is this a Virtual Organization situation
if self.vorg:
if self.vorg.do_aggregation(name_id):
# Get the extended identity
ava = self.users.get_identity(name_id)[0]
return ava
#noinspection PyUnusedLocal
@staticmethod
def is_session_valid(_session_id):
""" Place holder. Supposed to check if the session is still valid.
"""
return True
def service_urls(self, binding=BINDING_HTTP_POST):
_res = self.config.endpoint("assertion_consumer_service", binding, "sp")
if _res:
return _res
else:
return None
def create_authn_request(self, destination, vorg="", scoping=None,
binding=saml2.BINDING_HTTP_POST,
nameid_format=None,
service_url_binding=None, message_id=0,
consent=None, extensions=None, sign=None,
allow_create=False, sign_prepare=False, **kwargs):
""" Creates an authentication request.
:param destination: Where the request should be sent.
:param vorg: The virtual organization the service belongs to.
:param scoping: The scope of the request
:param binding: The protocol to use for the Response !!
:param nameid_format: Format of the NameID
:param service_url_binding: Where the reply should be sent dependent
on reply binding.
:param message_id: The identifier for this request
:param consent: Whether the principal have given her consent
:param extensions: Possible extensions
:param sign: Whether the request should be signed or not.
:param sign_prepare: Whether the signature should be prepared or not.
:param allow_create: If the identity provider is allowed, in the course
of fulfilling the request, to create a new identifier to represent
the principal.
:param kwargs: Extra key word arguments
:return: tuple of request ID and <samlp:AuthnRequest> instance
"""
client_crt = None
if "client_crt" in kwargs:
client_crt = kwargs["client_crt"]
args = {}
try:
args["assertion_consumer_service_url"] = kwargs[
"assertion_consumer_service_urls"][0]
del kwargs["assertion_consumer_service_urls"]
except KeyError:
try:
args["assertion_consumer_service_url"] = kwargs[
"assertion_consumer_service_url"]
del kwargs["assertion_consumer_service_url"]
except KeyError:
try:
args["attribute_consuming_service_index"] = str(kwargs[
"attribute_consuming_service_index"])
del kwargs["attribute_consuming_service_index"]
except KeyError:
if service_url_binding is None:
service_urls = self.service_urls(binding)
else:
service_urls = self.service_urls(service_url_binding)
args["assertion_consumer_service_url"] = service_urls[0]
try:
args["provider_name"] = kwargs["provider_name"]
except KeyError:
if binding == BINDING_PAOS:
pass
else:
args["provider_name"] = self._my_name()
# Allow argument values either as class instances or as dictionaries
# all of these have cardinality 0..1
_msg = AuthnRequest()
for param in ["scoping", "requested_authn_context", "conditions",
"subject", "scoping"]:
try:
_item = kwargs[param]
except KeyError:
pass
else:
del kwargs[param]
# either class instance or argument dictionary
if isinstance(_item, _msg.child_class(param)):
args[param] = _item
elif isinstance(_item, dict):
args[param] = RequestedAuthnContext(**_item)
else:
raise ValueError("%s or wrong type expected %s" % (_item,
param))
try:
args["name_id_policy"] = kwargs["name_id_policy"]
del kwargs["name_id_policy"]
except KeyError:
if allow_create:
allow_create = "true"
else:
allow_create = "false"
if nameid_format == "":
name_id_policy = None
else:
if nameid_format is None:
nameid_format = self.config.getattr("name_id_format", "sp")
if nameid_format is None:
nameid_format = NAMEID_FORMAT_TRANSIENT
elif isinstance(nameid_format, list):
# NameIDPolicy can only have one format specified
nameid_format = nameid_format[0]
name_id_policy = samlp.NameIDPolicy(allow_create=allow_create,
format=nameid_format)
if name_id_policy and vorg:
try:
name_id_policy.sp_name_qualifier = vorg
name_id_policy.format = saml.NAMEID_FORMAT_PERSISTENT
except KeyError:
pass
args["name_id_policy"] = name_id_policy
try:
nsprefix = kwargs["nsprefix"]
except KeyError:
nsprefix = None
if kwargs:
_args, extensions = self._filter_args(AuthnRequest(), extensions,
**kwargs)
args.update(_args)
try:
del args["id"]
except KeyError:
pass
if sign is None:
sign = self.authn_requests_signed
if (sign and self.sec.cert_handler.generate_cert()) or \
client_crt is not None:
with self.lock:
self.sec.cert_handler.update_cert(True, client_crt)
if client_crt is not None:
sign_prepare = True
return self._message(AuthnRequest, destination, message_id,
consent, extensions, sign, sign_prepare,
protocol_binding=binding,
scoping=scoping, nsprefix=nsprefix, **args)
return self._message(AuthnRequest, destination, message_id, consent,
extensions, sign, sign_prepare,
protocol_binding=binding,
scoping=scoping, nsprefix=nsprefix, **args)
def create_attribute_query(self, destination, name_id=None,
attribute=None, message_id=0, consent=None,
extensions=None, sign=False, sign_prepare=False,
**kwargs):
""" Constructs an AttributeQuery
:param destination: To whom the query should be sent
:param name_id: The identifier of the subject
:param attribute: A dictionary of attributes and values that is
asked for. The key are one of 4 variants:
3-tuple of name_format,name and friendly_name,
2-tuple of name_format and name,
1-tuple with name or
just the name as a string.
:param sp_name_qualifier: The unique identifier of the
service provider or affiliation of providers for whom the
identifier was generated.
:param name_qualifier: The unique identifier of the identity
provider that generated the identifier.
:param format: The format of the name ID
:param message_id: The identifier of the session
:param consent: Whether the principal have given her consent
:param extensions: Possible extensions
:param sign: Whether the query should be signed or not.
:param sign_prepare: Whether the Signature element should be added.
:return: Tuple of request ID and an AttributeQuery instance
"""
if name_id is None:
if "subject_id" in kwargs:
name_id = saml.NameID(text=kwargs["subject_id"])
for key in ["sp_name_qualifier", "name_qualifier",
"format"]:
try:
setattr(name_id, key, kwargs[key])
except KeyError:
pass
else:
raise AttributeError("Missing required parameter")
elif isinstance(name_id, basestring):
name_id = saml.NameID(text=name_id)
for key in ["sp_name_qualifier", "name_qualifier", "format"]:
try:
setattr(name_id, key, kwargs[key])
except KeyError:
pass
subject = saml.Subject(name_id=name_id)
if attribute:
attribute = do_attributes(attribute)
try:
nsprefix = kwargs["nsprefix"]
except KeyError:
nsprefix = None
return self._message(AttributeQuery, destination, message_id, consent,
extensions, sign, sign_prepare, subject=subject,
attribute=attribute, nsprefix=nsprefix)
# MUST use SOAP for
# AssertionIDRequest, SubjectQuery,
# AuthnQuery, AttributeQuery, or AuthzDecisionQuery
def create_authz_decision_query(self, destination, action,
evidence=None, resource=None, subject=None,
message_id=0, consent=None, extensions=None,
sign=None, **kwargs):
""" Creates an authz decision query.
:param destination: The IdP endpoint
:param action: The action you want to perform (has to be at least one)
:param evidence: Why you should be able to perform the action
:param resource: The resource you want to perform the action on
:param subject: Who wants to do the thing
:param message_id: Message identifier
:param consent: If the principal gave her consent to this request
:param extensions: Possible request extensions
:param sign: Whether the request should be signed or not.
:return: AuthzDecisionQuery instance
"""
return self._message(AuthzDecisionQuery, destination, message_id,
consent, extensions, sign, action=action,
evidence=evidence, resource=resource,
subject=subject, **kwargs)
def create_authz_decision_query_using_assertion(self, destination,
assertion, action=None,
resource=None,
subject=None, message_id=0,
consent=None,
extensions=None,
sign=False, nsprefix=None):
""" Makes an authz decision query based on a previously received
Assertion.
:param destination: The IdP endpoint to send the request to
:param assertion: An Assertion instance
:param action: The action you want to perform (has to be at least one)
:param resource: The resource you want to perform the action on
:param subject: Who wants to do the thing
:param message_id: Message identifier
:param consent: If the principal gave her consent to this request
:param extensions: Possible request extensions
:param sign: Whether the request should be signed or not.
:return: AuthzDecisionQuery instance
"""
if action:
if isinstance(action, basestring):
_action = [saml.Action(text=action)]
else:
_action = [saml.Action(text=a) for a in action]
else:
_action = None
return self.create_authz_decision_query(
destination, _action, saml.Evidence(assertion=assertion),
resource, subject, message_id=message_id, consent=consent,
extensions=extensions, sign=sign, nsprefix=nsprefix)
@staticmethod
def create_assertion_id_request(assertion_id_refs, **kwargs):
"""
:param assertion_id_refs:
:return: One ID ref
"""
if isinstance(assertion_id_refs, basestring):
return 0, assertion_id_refs
else:
return 0, assertion_id_refs[0]
def create_authn_query(self, subject, destination=None, authn_context=None,
session_index="", message_id=0, consent=None,
extensions=None, sign=False, nsprefix=None):
"""
:param subject: The subject its all about as a <Subject> instance
:param destination: The IdP endpoint to send the request to
:param authn_context: list of <RequestedAuthnContext> instances
:param session_index: a specified session index
:param message_id: Message identifier
:param consent: If the principal gave her consent to this request
:param extensions: Possible request extensions
:param sign: Whether the request should be signed or not.
:return:
"""
return self._message(AuthnQuery, destination, message_id, consent,
extensions, sign, subject=subject,
session_index=session_index,
requested_authn_context=authn_context,
nsprefix=nsprefix)
def create_name_id_mapping_request(self, name_id_policy,
name_id=None, base_id=None,
encrypted_id=None, destination=None,
message_id=0, consent=None,
extensions=None, sign=False,
nsprefix=None):
"""
:param name_id_policy:
:param name_id:
:param base_id:
:param encrypted_id:
:param destination:
:param message_id: Message identifier
:param consent: If the principal gave her consent to this request
:param extensions: Possible request extensions
:param sign: Whether the request should be signed or not.
:return:
"""
# One of them must be present
assert name_id or base_id or encrypted_id
if name_id:
return self._message(NameIDMappingRequest, destination, message_id,
consent, extensions, sign,
name_id_policy=name_id_policy, name_id=name_id,
nsprefix=nsprefix)
elif base_id:
return self._message(NameIDMappingRequest, destination, message_id,
consent, extensions, sign,
name_id_policy=name_id_policy, base_id=base_id,
nsprefix=nsprefix)
else:
return self._message(NameIDMappingRequest, destination, message_id,
consent, extensions, sign,
name_id_policy=name_id_policy,
encrypted_id=encrypted_id, nsprefix=nsprefix)
# ======== response handling ===========
def parse_authn_request_response(self, xmlstr, binding, outstanding=None,
outstanding_certs=None):
""" Deal with an AuthnResponse
:param xmlstr: The reply as a xml string
:param binding: Which binding that was used for the transport
:param outstanding: A dictionary with session IDs as keys and
the original web request from the user before redirection
as values.
:return: An response.AuthnResponse or None
"""
try:
_ = self.config.entityid
except KeyError:
raise SAMLError("Missing entity_id specification")
resp = None
if xmlstr:
kwargs = {
"outstanding_queries": outstanding,
"outstanding_certs": outstanding_certs,
"allow_unsolicited": self.allow_unsolicited,
"want_assertions_signed": self.want_assertions_signed,
"want_response_signed": self.want_response_signed,
"return_addrs": self.service_urls(binding=binding),
"entity_id": self.config.entityid,
"attribute_converters": self.config.attribute_converters,
"allow_unknown_attributes":
self.config.allow_unknown_attributes,
}
try:
resp = self._parse_response(xmlstr, AuthnResponse,
"assertion_consumer_service",
binding, **kwargs)
except StatusError as err:
logger.error("SAML status error: %s" % err)
raise
except UnravelError:
return None
except Exception as err:
logger.error("XML parse error: %s" % err)
raise
#logger.debug(">> %s", resp)
if resp is None:
return None
elif isinstance(resp, AuthnResponse):
self.users.add_information_about_person(resp.session_info())
logger.info("--- ADDED person info ----")
pass
else:
logger.error("Response type not supported: %s" % (
saml2.class_name(resp),))
return resp
# ------------------------------------------------------------------------
# SubjectQuery, AuthnQuery, RequestedAuthnContext, AttributeQuery,
# AuthzDecisionQuery all get Response as response
def parse_authz_decision_query_response(self, response,
binding=BINDING_SOAP):
""" Verify that the response is OK
"""
kwargs = {"entity_id": self.config.entityid,
"attribute_converters": self.config.attribute_converters}
return self._parse_response(response, AuthzResponse, "", binding,
**kwargs)
def parse_authn_query_response(self, response, binding=BINDING_SOAP):
""" Verify that the response is OK
"""
kwargs = {"entity_id": self.config.entityid,
"attribute_converters": self.config.attribute_converters}
return self._parse_response(response, AuthnQueryResponse, "", binding,
**kwargs)
def parse_assertion_id_request_response(self, response, binding):
""" Verify that the response is OK
"""
kwargs = {"entity_id": self.config.entityid,
"attribute_converters": self.config.attribute_converters}
res = self._parse_response(response, AssertionIDResponse, "", binding,
**kwargs)
return res
# ------------------------------------------------------------------------
def parse_attribute_query_response(self, response, binding):
kwargs = {"entity_id": self.config.entityid,
"attribute_converters": self.config.attribute_converters}
return self._parse_response(response, AttributeResponse,
"attribute_consuming_service", binding,
**kwargs)
def parse_name_id_mapping_request_response(self, txt, binding=BINDING_SOAP):
"""
:param txt: SOAP enveloped SAML message
:param binding: Just a placeholder, it's always BINDING_SOAP
:return: parsed and verified <NameIDMappingResponse> instance
"""
return self._parse_response(txt, NameIDMappingResponse, "", binding)
# ------------------- ECP ------------------------------------------------
def create_ecp_authn_request(self, entityid=None, relay_state="",
sign=False, **kwargs):
""" Makes an authentication request.
:param entityid: The entity ID of the IdP to send the request to
:param relay_state: A token that can be used by the SP to know
where to continue the conversation with the client
:param sign: Whether the request should be signed or not.
:return: SOAP message with the AuthnRequest
"""
# ----------------------------------------
# <paos:Request>
# ----------------------------------------
my_url = self.service_urls(BINDING_PAOS)[0]
# must_understand and act according to the standard
#
paos_request = paos.Request(must_understand="1", actor=ACTOR,
response_consumer_url=my_url,
service=ECP_SERVICE)
# ----------------------------------------
# <ecp:RelayState>
# ----------------------------------------
relay_state = ecp.RelayState(actor=ACTOR, must_understand="1",
text=relay_state)
# ----------------------------------------
# <samlp:AuthnRequest>
# ----------------------------------------
try:
authn_req = kwargs["authn_req"]
try:
req_id = authn_req.id
except AttributeError:
req_id = 0 # Unknown but since it's SOAP it doesn't matter
except KeyError:
try:
_binding = kwargs["binding"]
except KeyError:
_binding = BINDING_SOAP
kwargs["binding"] = _binding
logger.debug("entityid: %s, binding: %s" % (entityid, _binding))
# The IDP publishes support for ECP by using the SOAP binding on
# SingleSignOnService
_, location = self.pick_binding("single_sign_on_service",
[_binding], entity_id=entityid)
req_id, authn_req = self.create_authn_request(
location, service_url_binding=BINDING_PAOS, **kwargs)
# ----------------------------------------
# The SOAP envelope
# ----------------------------------------
soap_envelope = make_soap_enveloped_saml_thingy(authn_req,
[paos_request,
relay_state])
return req_id, "%s" % soap_envelope
def parse_ecp_authn_response(self, txt, outstanding=None):
rdict = soap.class_instances_from_soap_enveloped_saml_thingies(txt,
[paos,
ecp,
samlp])
_relay_state = None
for item in rdict["header"]:
if item.c_tag == "RelayState" and\
item.c_namespace == ecp.NAMESPACE:
_relay_state = item
response = self.parse_authn_request_response(rdict["body"],
BINDING_PAOS, outstanding)
return response, _relay_state
@staticmethod
def can_handle_ecp_response(response):
try:
accept = response.headers["accept"]
except KeyError:
try:
accept = response.headers["Accept"]
except KeyError:
return False
if MIME_PAOS in accept:
return True
else:
return False
# ----------------------------------------------------------------------
# IDP discovery
# ----------------------------------------------------------------------
@staticmethod
def create_discovery_service_request(url, entity_id, **kwargs):
"""
Created the HTTP redirect URL needed to send the user to the
discovery service.
:param url: The URL of the discovery service
:param entity_id: The unique identifier of the service provider
:param return: The discovery service MUST redirect the user agent
to this location in response to this request
:param policy: A parameter name used to indicate the desired behavior
controlling the processing of the discovery service
:param returnIDParam: A parameter name used to return the unique
identifier of the selected identity provider to the original
requester.
:param isPassive: A boolean value True/False that controls
whether the discovery service is allowed to visibly interact with
the user agent.
:return: A URL
"""
args = {"entityID": entity_id}
for key in ["policy", "returnIDParam"]:
try:
args[key] = kwargs[key]
except KeyError:
pass
try:
args["return"] = kwargs["return_url"]
except KeyError:
try:
args["return"] = kwargs["return"]
except KeyError:
pass
if "isPassive" in kwargs:
if kwargs["isPassive"]:
args["isPassive"] = "true"
else:
args["isPassive"] = "false"
params = urlencode(args)
return "%s?%s" % (url, params)
@staticmethod
def parse_discovery_service_response(url="", query="",
returnIDParam="entityID"):
"""
Deal with the response url from a Discovery Service
:param url: the url the user was redirected back to or
:param query: just the query part of the URL.
:param returnIDParam: This is where the identifier of the IdP is
place if it was specified in the query. Default is 'entityID'
:return: The IdP identifier or "" if none was given
"""
if url:
part = urlparse(url)
qsd = parse_qs(part[4])
elif query:
qsd = parse_qs(query)
else:
qsd = {}
try:
return qsd[returnIDParam][0]
except KeyError:
return ""
| apache-2.0 |
hdinsight/hue | desktop/core/ext-py/boto-2.38.0/boto/dynamodb/item.py | 153 | 8280 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.dynamodb.exceptions import DynamoDBItemError
class Item(dict):
"""
An item in Amazon DynamoDB.
:ivar hash_key: The HashKey of this item.
:ivar range_key: The RangeKey of this item or None if no RangeKey
is defined.
:ivar hash_key_name: The name of the HashKey associated with this item.
:ivar range_key_name: The name of the RangeKey associated with this item.
:ivar table: The Table this item belongs to.
"""
def __init__(self, table, hash_key=None, range_key=None, attrs=None):
self.table = table
self._updates = None
self._hash_key_name = self.table.schema.hash_key_name
self._range_key_name = self.table.schema.range_key_name
if attrs is None:
attrs = {}
if hash_key is None:
hash_key = attrs.get(self._hash_key_name, None)
self[self._hash_key_name] = hash_key
if self._range_key_name:
if range_key is None:
range_key = attrs.get(self._range_key_name, None)
self[self._range_key_name] = range_key
self._updates = {}
for key, value in attrs.items():
if key != self._hash_key_name and key != self._range_key_name:
self[key] = value
self.consumed_units = 0
@property
def hash_key(self):
return self[self._hash_key_name]
@property
def range_key(self):
return self.get(self._range_key_name)
@property
def hash_key_name(self):
return self._hash_key_name
@property
def range_key_name(self):
return self._range_key_name
def add_attribute(self, attr_name, attr_value):
"""
Queue the addition of an attribute to an item in DynamoDB.
This will eventually result in an UpdateItem request being issued
with an update action of ADD when the save method is called.
:type attr_name: str
:param attr_name: Name of the attribute you want to alter.
:type attr_value: int|long|float|set
:param attr_value: Value which is to be added to the attribute.
"""
self._updates[attr_name] = ("ADD", attr_value)
def delete_attribute(self, attr_name, attr_value=None):
"""
Queue the deletion of an attribute from an item in DynamoDB.
This call will result in a UpdateItem request being issued
with update action of DELETE when the save method is called.
:type attr_name: str
:param attr_name: Name of the attribute you want to alter.
:type attr_value: set
:param attr_value: A set of values to be removed from the attribute.
This parameter is optional. If None, the whole attribute is
removed from the item.
"""
self._updates[attr_name] = ("DELETE", attr_value)
def put_attribute(self, attr_name, attr_value):
"""
Queue the putting of an attribute to an item in DynamoDB.
This call will result in an UpdateItem request being issued
with the update action of PUT when the save method is called.
:type attr_name: str
:param attr_name: Name of the attribute you want to alter.
:type attr_value: int|long|float|str|set
:param attr_value: New value of the attribute.
"""
self._updates[attr_name] = ("PUT", attr_value)
def save(self, expected_value=None, return_values=None):
"""
Commits pending updates to Amazon DynamoDB.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that
you expect. This dictionary should have name/value pairs
where the name is the name of the attribute and the value is
either the value you are expecting or False if you expect
the attribute not to exist.
:type return_values: str
:param return_values: Controls the return of attribute name/value pairs
before they were updated. Possible values are: None, 'ALL_OLD',
'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is
specified and the item is overwritten, the content of the old item
is returned. If 'ALL_NEW' is specified, then all the attributes of
the new version of the item are returned. If 'UPDATED_NEW' is
specified, the new versions of only the updated attributes are
returned.
"""
return self.table.layer2.update_item(self, expected_value,
return_values)
def delete(self, expected_value=None, return_values=None):
"""
Delete the item from DynamoDB.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that
you expect. This dictionary should have name/value pairs
where the name is the name of the attribute and the value
is either the value you are expecting or False if you expect
the attribute not to exist.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
return self.table.layer2.delete_item(self, expected_value,
return_values)
def put(self, expected_value=None, return_values=None):
"""
Store a new item or completely replace an existing item
in Amazon DynamoDB.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that
you expect. This dictionary should have name/value pairs
where the name is the name of the attribute and the value
is either the value you are expecting or False if you expect
the attribute not to exist.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
return self.table.layer2.put_item(self, expected_value, return_values)
def __setitem__(self, key, value):
"""Overrwrite the setter to instead update the _updates
method so this can act like a normal dict"""
if self._updates is not None:
self.put_attribute(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
"""Remove this key from the items"""
if self._updates is not None:
self.delete_attribute(key)
dict.__delitem__(self, key)
# Allow this item to still be pickled
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
| apache-2.0 |
popazerty/obh-test1 | lib/python/Components/Network.py | 26 | 23663 | import os
import re
from socket import *
from Components.Console import Console
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
class Network:
def __init__(self):
self.ifaces = {}
self.configuredNetworkAdapters = []
self.NetworkState = 0
self.DnsState = 0
self.nameservers = []
self.ethtool_bin = "ethtool"
self.Console = Console()
self.LinkConsole = Console()
self.restartConsole = Console()
self.deactivateInterfaceConsole = Console()
self.activateInterfaceConsole = Console()
self.resetNetworkConsole = Console()
self.DnsConsole = Console()
self.PingConsole = Console()
self.config_ready = None
self.friendlyNames = {}
self.lan_interfaces = []
self.wlan_interfaces = []
self.remoteRootFS = None
self.getInterfaces()
def onRemoteRootFS(self):
if self.remoteRootFS is None:
import Harddisk
for parts in Harddisk.getProcMounts():
if parts[1] == '/' and parts[2] == 'nfs':
self.remoteRootFS = True
break
else:
self.remoteRootFS = False
return self.remoteRootFS
def isBlacklisted(self, iface):
return iface in ('lo', 'wifi0', 'wmaster0', 'sit0', 'tun0')
def getInterfaces(self, callback = None):
self.configuredInterfaces = []
for device in self.getInstalledAdapters():
self.getAddrInet(device, callback)
# helper function
def regExpMatch(self, pattern, string):
if string is None:
return None
try:
return pattern.search(string).group()
except AttributeError:
return None
# helper function to convert ips from a sring to a list of ints
def convertIP(self, ip):
return [ int(n) for n in ip.split('.') ]
def getAddrInet(self, iface, callback):
if not self.Console:
self.Console = Console()
cmd = "ip -o addr show dev " + iface
self.Console.ePopen(cmd, self.IPaddrFinished, [iface,callback])
def IPaddrFinished(self, result, retval, extra_args):
(iface, callback ) = extra_args
data = { 'up': False, 'dhcp': False, 'preup' : False, 'predown' : False }
globalIPpattern = re.compile("scope global")
ipRegexp = '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'
netRegexp = '[0-9]{1,2}'
macRegexp = '[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}'
ipLinePattern = re.compile('inet ' + ipRegexp + '/')
ipPattern = re.compile(ipRegexp)
netmaskLinePattern = re.compile('/' + netRegexp)
netmaskPattern = re.compile(netRegexp)
bcastLinePattern = re.compile(' brd ' + ipRegexp)
upPattern = re.compile('UP')
macPattern = re.compile(macRegexp)
macLinePattern = re.compile('link/ether ' + macRegexp)
for line in result.splitlines():
split = line.strip().split(' ',2)
if (split[1][:-1] == iface):
up = self.regExpMatch(upPattern, split[2])
mac = self.regExpMatch(macPattern, self.regExpMatch(macLinePattern, split[2]))
if up is not None:
data['up'] = True
if iface is not 'lo':
self.configuredInterfaces.append(iface)
if mac is not None:
data['mac'] = mac
if (split[1] == iface):
if re.search(globalIPpattern, split[2]):
ip = self.regExpMatch(ipPattern, self.regExpMatch(ipLinePattern, split[2]))
netmask = self.calc_netmask(self.regExpMatch(netmaskPattern, self.regExpMatch(netmaskLinePattern, split[2])))
bcast = self.regExpMatch(ipPattern, self.regExpMatch(bcastLinePattern, split[2]))
if ip is not None:
data['ip'] = self.convertIP(ip)
if netmask is not None:
data['netmask'] = self.convertIP(netmask)
if bcast is not None:
data['bcast'] = self.convertIP(bcast)
if not data.has_key('ip'):
data['dhcp'] = True
data['ip'] = [0, 0, 0, 0]
data['netmask'] = [0, 0, 0, 0]
data['gateway'] = [0, 0, 0, 0]
cmd = "route -n | grep " + iface
self.Console.ePopen(cmd,self.routeFinished, [iface, data, callback])
def routeFinished(self, result, retval, extra_args):
(iface, data, callback) = extra_args
ipRegexp = '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'
ipPattern = re.compile(ipRegexp)
ipLinePattern = re.compile(ipRegexp)
for line in result.splitlines():
print line[0:7]
if line[0:7] == "0.0.0.0":
gateway = self.regExpMatch(ipPattern, line[16:31])
if gateway:
data['gateway'] = self.convertIP(gateway)
self.ifaces[iface] = data
self.loadNetworkConfig(iface,callback)
def writeNetworkConfig(self):
self.configuredInterfaces = []
fp = file('/etc/network/interfaces', 'w')
fp.write("# automatically generated by enigma2\n# do NOT change manually!\n\n")
fp.write("auto lo\n")
fp.write("iface lo inet loopback\n\n")
for ifacename, iface in self.ifaces.items():
if iface['up'] == True:
fp.write("auto " + ifacename + "\n")
self.configuredInterfaces.append(ifacename)
if iface['dhcp'] == True:
fp.write("iface "+ ifacename +" inet dhcp\n")
if iface['dhcp'] == False:
fp.write("iface "+ ifacename +" inet static\n")
if iface.has_key('ip'):
print tuple(iface['ip'])
fp.write(" address %d.%d.%d.%d\n" % tuple(iface['ip']))
fp.write(" netmask %d.%d.%d.%d\n" % tuple(iface['netmask']))
if iface.has_key('gateway'):
fp.write(" gateway %d.%d.%d.%d\n" % tuple(iface['gateway']))
if iface.has_key("configStrings"):
fp.write(iface["configStrings"])
if iface["preup"] is not False and not iface.has_key("configStrings"):
fp.write(iface["preup"])
if iface["predown"] is not False and not iface.has_key("configStrings"):
fp.write(iface["predown"])
fp.write("\n")
fp.close()
self.configuredNetworkAdapters = self.configuredInterfaces
self.writeNameserverConfig()
def writeNameserverConfig(self):
fp = file('/etc/resolv.conf', 'w')
for nameserver in self.nameservers:
fp.write("nameserver %d.%d.%d.%d\n" % tuple(nameserver))
fp.close()
def loadNetworkConfig(self,iface,callback = None):
interfaces = []
# parse the interfaces-file
try:
fp = file('/etc/network/interfaces', 'r')
interfaces = fp.readlines()
fp.close()
except:
print "[Network.py] interfaces - opening failed"
ifaces = {}
currif = ""
for i in interfaces:
split = i.strip().split(' ')
if (split[0] == "iface"):
currif = split[1]
ifaces[currif] = {}
if (len(split) == 4 and split[3] == "dhcp"):
ifaces[currif]["dhcp"] = True
else:
ifaces[currif]["dhcp"] = False
if (currif == iface): #read information only for available interfaces
if (split[0] == "address"):
ifaces[currif]["address"] = map(int, split[1].split('.'))
if self.ifaces[currif].has_key("ip"):
if self.ifaces[currif]["ip"] != ifaces[currif]["address"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["ip"] = map(int, split[1].split('.'))
if (split[0] == "netmask"):
ifaces[currif]["netmask"] = map(int, split[1].split('.'))
if self.ifaces[currif].has_key("netmask"):
if self.ifaces[currif]["netmask"] != ifaces[currif]["netmask"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["netmask"] = map(int, split[1].split('.'))
if (split[0] == "gateway"):
ifaces[currif]["gateway"] = map(int, split[1].split('.'))
if self.ifaces[currif].has_key("gateway"):
if self.ifaces[currif]["gateway"] != ifaces[currif]["gateway"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["gateway"] = map(int, split[1].split('.'))
if (split[0] == "pre-up"):
if self.ifaces[currif].has_key("preup"):
self.ifaces[currif]["preup"] = i
if (split[0] in ("pre-down","post-down")):
if self.ifaces[currif].has_key("predown"):
self.ifaces[currif]["predown"] = i
for ifacename, iface in ifaces.items():
if self.ifaces.has_key(ifacename):
self.ifaces[ifacename]["dhcp"] = iface["dhcp"]
if self.Console:
if len(self.Console.appContainers) == 0:
# save configured interfacelist
self.configuredNetworkAdapters = self.configuredInterfaces
# load ns only once
self.loadNameserverConfig()
print "read configured interface:", ifaces
print "self.ifaces after loading:", self.ifaces
self.config_ready = True
self.msgPlugins()
if callback is not None:
callback(True)
def loadNameserverConfig(self):
ipRegexp = "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}"
nameserverPattern = re.compile("nameserver +" + ipRegexp)
ipPattern = re.compile(ipRegexp)
resolv = []
try:
fp = file('/etc/resolv.conf', 'r')
resolv = fp.readlines()
fp.close()
self.nameservers = []
except:
print "[Network.py] resolv.conf - opening failed"
for line in resolv:
if self.regExpMatch(nameserverPattern, line) is not None:
ip = self.regExpMatch(ipPattern, line)
if ip:
self.nameservers.append(self.convertIP(ip))
print "nameservers:", self.nameservers
def getInstalledAdapters(self):
return [x for x in os.listdir('/sys/class/net') if not self.isBlacklisted(x)]
def getConfiguredAdapters(self):
return self.configuredNetworkAdapters
def getNumberOfAdapters(self):
return len(self.ifaces)
def getFriendlyAdapterName(self, x):
if x in self.friendlyNames.keys():
return self.friendlyNames.get(x, x)
self.friendlyNames[x] = self.getFriendlyAdapterNaming(x)
return self.friendlyNames.get(x, x) # when we have no friendly name, use adapter name
def getFriendlyAdapterNaming(self, iface):
name = None
if self.isWirelessInterface(iface):
if iface not in self.wlan_interfaces:
name = _("WLAN connection")
if len(self.wlan_interfaces):
name += " " + str(len(self.wlan_interfaces)+1)
self.wlan_interfaces.append(iface)
else:
if iface not in self.lan_interfaces:
name = _("LAN connection")
if len(self.lan_interfaces):
name += " " + str(len(self.lan_interfaces)+1)
self.lan_interfaces.append(iface)
return name
def getFriendlyAdapterDescription(self, iface):
if not self.isWirelessInterface(iface):
return _('Ethernet network interface')
moduledir = self.getWlanModuleDir(iface)
if moduledir:
name = os.path.basename(os.path.realpath(moduledir))
if name in ('ath_pci','ath5k'):
name = 'Atheros'
elif name in ('rt73','rt73usb','rt3070sta'):
name = 'Ralink'
elif name == 'zd1211b':
name = 'Zydas'
elif name == 'r871x_usb_drv':
name = 'Realtek'
else:
name = _('Unknown')
return name + ' ' + _('wireless network interface')
def getAdapterName(self, iface):
return iface
def getAdapterList(self):
return self.ifaces.keys()
def getAdapterAttribute(self, iface, attribute):
if self.ifaces.has_key(iface):
if self.ifaces[iface].has_key(attribute):
return self.ifaces[iface][attribute]
return None
def setAdapterAttribute(self, iface, attribute, value):
print "setting for adapter", iface, "attribute", attribute, " to value", value
if self.ifaces.has_key(iface):
self.ifaces[iface][attribute] = value
def removeAdapterAttribute(self, iface, attribute):
if self.ifaces.has_key(iface):
if self.ifaces[iface].has_key(attribute):
del self.ifaces[iface][attribute]
def getNameserverList(self):
if len(self.nameservers) == 0:
return [[0, 0, 0, 0], [0, 0, 0, 0]]
else:
return self.nameservers
def clearNameservers(self):
self.nameservers = []
def addNameserver(self, nameserver):
if nameserver not in self.nameservers:
self.nameservers.append(nameserver)
def removeNameserver(self, nameserver):
if nameserver in self.nameservers:
self.nameservers.remove(nameserver)
def changeNameserver(self, oldnameserver, newnameserver):
if oldnameserver in self.nameservers:
for i in range(len(self.nameservers)):
if self.nameservers[i] == oldnameserver:
self.nameservers[i] = newnameserver
def resetNetworkConfig(self, mode='lan', callback = None):
self.resetNetworkConsole = Console()
self.commands = []
self.commands.append("/etc/init.d/avahi-daemon stop")
for iface in self.ifaces.keys():
if iface != 'eth0' or not self.onRemoteRootFS():
self.commands.append("ip addr flush dev " + iface)
self.commands.append("/etc/init.d/networking stop")
self.commands.append("killall -9 udhcpc")
self.commands.append("rm /var/run/udhcpc*")
self.resetNetworkConsole.eBatch(self.commands, self.resetNetworkFinishedCB, [mode, callback], debug=True)
def resetNetworkFinishedCB(self, extra_args):
(mode, callback) = extra_args
if len(self.resetNetworkConsole.appContainers) == 0:
self.writeDefaultNetworkConfig(mode, callback)
def writeDefaultNetworkConfig(self,mode='lan', callback = None):
fp = file('/etc/network/interfaces', 'w')
fp.write("# automatically generated by enigma2\n# do NOT change manually!\n\n")
fp.write("auto lo\n")
fp.write("iface lo inet loopback\n\n")
if mode == 'wlan':
fp.write("auto wlan0\n")
fp.write("iface wlan0 inet dhcp\n")
if mode == 'wlan-mpci':
fp.write("auto ath0\n")
fp.write("iface ath0 inet dhcp\n")
if mode == 'lan':
fp.write("auto eth0\n")
fp.write("iface eth0 inet dhcp\n")
fp.write("\n")
fp.close()
self.resetNetworkConsole = Console()
self.commands = []
if mode == 'wlan':
self.commands.append("ifconfig eth0 down")
self.commands.append("ifconfig ath0 down")
self.commands.append("ifconfig wlan0 up")
if mode == 'wlan-mpci':
self.commands.append("ifconfig eth0 down")
self.commands.append("ifconfig wlan0 down")
self.commands.append("ifconfig ath0 up")
if mode == 'lan':
self.commands.append("ifconfig eth0 up")
self.commands.append("ifconfig wlan0 down")
self.commands.append("ifconfig ath0 down")
self.commands.append("/etc/init.d/avahi-daemon start")
self.resetNetworkConsole.eBatch(self.commands, self.resetNetworkFinished, [mode,callback], debug=True)
def resetNetworkFinished(self,extra_args):
(mode, callback) = extra_args
if len(self.resetNetworkConsole.appContainers) == 0:
if callback is not None:
callback(True,mode)
def checkNetworkState(self,statecallback):
self.NetworkState = 0
cmd1 = "ping -c 1 www.openpli.org"
cmd2 = "ping -c 1 www.google.nl"
cmd3 = "ping -c 1 www.google.com"
self.PingConsole = Console()
self.PingConsole.ePopen(cmd1, self.checkNetworkStateFinished,statecallback)
self.PingConsole.ePopen(cmd2, self.checkNetworkStateFinished,statecallback)
self.PingConsole.ePopen(cmd3, self.checkNetworkStateFinished,statecallback)
def checkNetworkStateFinished(self, result, retval,extra_args):
(statecallback) = extra_args
if self.PingConsole is not None:
if retval == 0:
self.PingConsole = None
statecallback(self.NetworkState)
else:
self.NetworkState += 1
if len(self.PingConsole.appContainers) == 0:
statecallback(self.NetworkState)
def restartNetwork(self,callback = None):
self.restartConsole = Console()
self.config_ready = False
self.msgPlugins()
self.commands = []
self.commands.append("/etc/init.d/avahi-daemon stop")
for iface in self.ifaces.keys():
if iface != 'eth0' or not self.onRemoteRootFS():
self.commands.append("ifdown " + iface)
self.commands.append("ip addr flush dev " + iface)
self.commands.append("/etc/init.d/networking stop")
self.commands.append("killall -9 udhcpc")
self.commands.append("rm /var/run/udhcpc*")
self.commands.append("/etc/init.d/networking start")
self.commands.append("/etc/init.d/avahi-daemon start")
self.restartConsole.eBatch(self.commands, self.restartNetworkFinished, callback, debug=True)
def restartNetworkFinished(self,extra_args):
( callback ) = extra_args
if callback is not None:
callback(True)
def getLinkState(self,iface,callback):
cmd = self.ethtool_bin + " " + iface
self.LinkConsole = Console()
self.LinkConsole.ePopen(cmd, self.getLinkStateFinished,callback)
def getLinkStateFinished(self, result, retval,extra_args):
(callback) = extra_args
if self.LinkConsole is not None:
if len(self.LinkConsole.appContainers) == 0:
callback(result)
def stopPingConsole(self):
if self.PingConsole is not None:
if len(self.PingConsole.appContainers):
for name in self.PingConsole.appContainers.keys():
self.PingConsole.kill(name)
def stopLinkStateConsole(self):
if self.LinkConsole is not None:
if len(self.LinkConsole.appContainers):
for name in self.LinkConsole.appContainers.keys():
self.LinkConsole.kill(name)
def stopDNSConsole(self):
if self.DnsConsole is not None:
if len(self.DnsConsole.appContainers):
for name in self.DnsConsole.appContainers.keys():
self.DnsConsole.kill(name)
def stopRestartConsole(self):
if self.restartConsole is not None:
if len(self.restartConsole.appContainers):
for name in self.restartConsole.appContainers.keys():
self.restartConsole.kill(name)
def stopGetInterfacesConsole(self):
if self.Console is not None:
if len(self.Console.appContainers):
for name in self.Console.appContainers.keys():
self.Console.kill(name)
def stopDeactivateInterfaceConsole(self):
if self.deactivateInterfaceConsole is not None:
self.deactivateInterfaceConsole.killAll()
self.deactivateInterfaceConsole = None
def stopActivateInterfaceConsole(self):
if self.activateInterfaceConsole is not None:
self.activateInterfaceConsole.killAll()
self.activateInterfaceConsole = None
def checkforInterface(self,iface):
if self.getAdapterAttribute(iface, 'up') is True:
return True
else:
ret=os.system("ifconfig " + iface + " up")
os.system("ifconfig " + iface + " down")
if ret == 0:
return True
else:
return False
def checkDNSLookup(self,statecallback):
cmd1 = "nslookup www.dream-multimedia-tv.de"
cmd2 = "nslookup www.heise.de"
cmd3 = "nslookup www.google.de"
self.DnsConsole = Console()
self.DnsConsole.ePopen(cmd1, self.checkDNSLookupFinished,statecallback)
self.DnsConsole.ePopen(cmd2, self.checkDNSLookupFinished,statecallback)
self.DnsConsole.ePopen(cmd3, self.checkDNSLookupFinished,statecallback)
def checkDNSLookupFinished(self, result, retval,extra_args):
(statecallback) = extra_args
if self.DnsConsole is not None:
if retval == 0:
self.DnsConsole = None
statecallback(self.DnsState)
else:
self.DnsState += 1
if len(self.DnsConsole.appContainers) == 0:
statecallback(self.DnsState)
def deactivateInterface(self,ifaces,callback = None):
self.config_ready = False
self.msgPlugins()
commands = []
def buildCommands(iface):
commands.append("ifdown " + iface)
commands.append("ip addr flush dev " + iface)
#wpa_supplicant sometimes doesn't quit properly on SIGTERM
if os.path.exists('/var/run/wpa_supplicant/'+ iface):
commands.append("wpa_cli -i" + iface + " terminate")
if not self.deactivateInterfaceConsole:
self.deactivateInterfaceConsole = Console()
if isinstance(ifaces, (list, tuple)):
for iface in ifaces:
if iface != 'eth0' or not self.onRemoteRootFS():
buildCommands(iface)
else:
if ifaces == 'eth0' and self.onRemoteRootFS():
if callback is not None:
callback(True)
return
buildCommands(ifaces)
self.deactivateInterfaceConsole.eBatch(commands, self.deactivateInterfaceFinished, [ifaces,callback], debug=True)
def deactivateInterfaceFinished(self,extra_args):
(ifaces, callback) = extra_args
def checkCommandResult(iface):
if self.deactivateInterfaceConsole and self.deactivateInterfaceConsole.appResults.has_key("ifdown " + iface):
result = str(self.deactivateInterfaceConsole.appResults.get("ifdown " + iface)).strip("\n")
if result == "ifdown: interface " + iface + " not configured":
return False
else:
return True
#ifdown sometimes can't get the interface down.
if isinstance(ifaces, (list, tuple)):
for iface in ifaces:
if checkCommandResult(iface) is False:
Console().ePopen(("ifconfig " + iface + " down" ))
else:
if checkCommandResult(ifaces) is False:
Console().ePopen(("ifconfig " + ifaces + " down" ))
if self.deactivateInterfaceConsole:
if len(self.deactivateInterfaceConsole.appContainers) == 0:
if callback is not None:
callback(True)
def activateInterface(self,iface,callback = None):
if self.config_ready:
self.config_ready = False
self.msgPlugins()
if iface == 'eth0' and self.onRemoteRootFS():
if callback is not None:
callback(True)
return
if not self.activateInterfaceConsole:
self.activateInterfaceConsole = Console()
commands = []
commands.append("ifup " + iface)
self.activateInterfaceConsole.eBatch(commands, self.activateInterfaceFinished, callback, debug=True)
def activateInterfaceFinished(self,extra_args):
callback = extra_args
if self.activateInterfaceConsole:
if len(self.activateInterfaceConsole.appContainers) == 0:
if callback is not None:
callback(True)
def sysfsPath(self, iface):
return '/sys/class/net/' + iface
def isWirelessInterface(self, iface):
if iface in self.wlan_interfaces:
return True
if os.path.isdir(self.sysfsPath(iface) + '/wireless'):
return True
# r871x_usb_drv on kernel 2.6.12 is not identifiable over /sys/class/net/'ifacename'/wireless so look also inside /proc/net/wireless
device = re.compile('[a-z]{2,}[0-9]*:')
ifnames = []
fp = open('/proc/net/wireless', 'r')
for line in fp:
try:
ifnames.append(device.search(line).group()[:-1])
except AttributeError:
pass
if iface in ifnames:
return True
return False
def getWlanModuleDir(self, iface = None):
devicedir = self.sysfsPath(iface) + '/device'
moduledir = devicedir + '/driver/module'
if os.path.isdir(moduledir):
return moduledir
# identification is not possible over default moduledir
for x in os.listdir(devicedir):
# rt3070 on kernel 2.6.18 registers wireless devices as usb_device (e.g. 1-1.3:1.0) and identification is only possible over /sys/class/net/'ifacename'/device/1-xxx
if x.startswith("1-"):
moduledir = devicedir + '/' + x + '/driver/module'
if os.path.isdir(moduledir):
return moduledir
# rt73, zd1211b, r871x_usb_drv on kernel 2.6.12 can be identified over /sys/class/net/'ifacename'/device/driver, so look also here
moduledir = devicedir + '/driver'
if os.path.isdir(moduledir):
return moduledir
return None
def detectWlanModule(self, iface = None):
if not self.isWirelessInterface(iface):
return None
devicedir = self.sysfsPath(iface) + '/device'
if os.path.isdir(devicedir + '/ieee80211'):
return 'nl80211'
moduledir = self.getWlanModuleDir(iface)
if moduledir:
module = os.path.basename(os.path.realpath(moduledir))
if module in ('ath_pci','ath5k'):
return 'madwifi'
if module in ('rt73','rt73'):
return 'ralink'
if module == 'zd1211b':
return 'zydas'
return 'wext'
def calc_netmask(self,nmask):
from struct import pack, unpack
from socket import inet_ntoa, inet_aton
mask = 1L<<31
xnet = (1L<<32)-1
cidr_range = range(0, 32)
cidr = long(nmask)
if cidr not in cidr_range:
print 'cidr invalid: %d' % cidr
return None
else:
nm = ((1L<<cidr)-1)<<(32-cidr)
netmask = str(inet_ntoa(pack('>L', nm)))
return netmask
def msgPlugins(self):
if self.config_ready is not None:
for p in plugins.getPlugins(PluginDescriptor.WHERE_NETWORKCONFIG_READ):
p(reason=self.config_ready)
def hotplug(self, event):
interface = event['INTERFACE']
if self.isBlacklisted(interface):
return
action = event['ACTION']
if action == "add":
print "[Network] Add new interface:", interface
self.getAddrInet(interface, None)
elif action == "remove":
print "[Network] Removed interface:", interface
try:
del self.ifaces[interface]
except KeyError:
pass
iNetwork = Network()
def InitNetwork():
pass
| gpl-2.0 |
OmarIthawi/edx-platform | lms/djangoapps/bulk_email/migrations/0008_add_course_authorizations.py | 60 | 6425 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseAuthorization'
db.create_table('bulk_email_courseauthorization', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('email_enabled', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('bulk_email', ['CourseAuthorization'])
def backwards(self, orm):
# Deleting model 'CourseAuthorization'
db.delete_table('bulk_email_courseauthorization')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'bulk_email.courseauthorization': {
'Meta': {'object_name': 'CourseAuthorization'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'email_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'bulk_email.courseemail': {
'Meta': {'object_name': 'CourseEmail'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'html_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'text_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'to_option': ('django.db.models.fields.CharField', [], {'default': "'myself'", 'max_length': '64'})
},
'bulk_email.courseemailtemplate': {
'Meta': {'object_name': 'CourseEmailTemplate'},
'html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plain_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'bulk_email.optout': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'Optout'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bulk_email'] | agpl-3.0 |
blaggacao/OpenUpgrade | addons/portal_project_issue/tests/__init__.py | 260 | 1086 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_access_rights
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
weimingtom/python-for-android | python-modules/zope/zope/interface/common/mapping.py | 50 | 3512 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Mapping Interfaces
$Id: mapping.py 110536 2010-04-06 02:59:44Z tseaver $
"""
from zope.interface import Interface
class IItemMapping(Interface):
"""Simplest readable mapping object
"""
def __getitem__(key):
"""Get a value for a key
A KeyError is raised if there is no value for the key.
"""
class IReadMapping(IItemMapping):
"""Basic mapping interface
"""
def get(key, default=None):
"""Get a value for a key
The default is returned if there is no value for the key.
"""
def __contains__(key):
"""Tell if a key exists in the mapping."""
class IWriteMapping(Interface):
"""Mapping methods for changing data"""
def __delitem__(key):
"""Delete a value from the mapping using the key."""
def __setitem__(key, value):
"""Set a new item in the mapping."""
class IEnumerableMapping(IReadMapping):
"""Mapping objects whose items can be enumerated.
"""
def keys():
"""Return the keys of the mapping object.
"""
def __iter__():
"""Return an iterator for the keys of the mapping object.
"""
def values():
"""Return the values of the mapping object.
"""
def items():
"""Return the items of the mapping object.
"""
def __len__():
"""Return the number of items.
"""
class IMapping(IWriteMapping, IEnumerableMapping):
''' Simple mapping interface '''
class IIterableMapping(IEnumerableMapping):
def iterkeys():
"iterate over keys; equivalent to __iter__"
def itervalues():
"iterate over values"
def iteritems():
"iterate over items"
class IClonableMapping(Interface):
def copy():
"return copy of dict"
class IExtendedReadMapping(IIterableMapping):
def has_key(key):
"""Tell if a key exists in the mapping; equivalent to __contains__"""
class IExtendedWriteMapping(IWriteMapping):
def clear():
"delete all items"
def update(d):
" Update D from E: for k in E.keys(): D[k] = E[k]"
def setdefault(key, default=None):
"D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D"
def pop(k, *args):
"""remove specified key and return the corresponding value
*args may contain a single default value, or may not be supplied.
If key is not found, default is returned if given, otherwise
KeyError is raised"""
def popitem():
"""remove and return some (key, value) pair as a
2-tuple; but raise KeyError if mapping is empty"""
class IFullMapping(
IExtendedReadMapping, IExtendedWriteMapping, IClonableMapping, IMapping):
''' Full mapping interface ''' # IMapping included so tests for IMapping
# succeed with IFullMapping
| apache-2.0 |
comiconomenclaturist/libretime | python_apps/airtime_analyzer/tests/replaygain_analyzer_tests.py | 2 | 4078 | from nose.tools import *
from airtime_analyzer.replaygain_analyzer import ReplayGainAnalyzer
'''
The tests in here were all tagged with the 'rgain' tag so the can be exluded from being run
with nosetest -a '!rgain'. This was needed due to the fact that it is not readily possible
to install replaygain on a containerized travis instance.
We can either give running replaygain test on travis another shot after ubuntu getsan updated
gi instrospection allowing us to install gi and gobject into the virtualenv, or we can switch
to a full machine and stop using 'sudo: false' on travis.
Deactivating these tests is a bad fix for now and I plan on looking into it again after
most everything else is up and running. For those interesed the tests seem to work locally
albeit my results not being up to the given tolerance of 0.30 (which I'm assuming is my rig's
problem and would work on travis if replaygain was available).
'''
def check_default_metadata(metadata):
''' Check that the values extract by Silan/CuePointAnalyzer on our test audio files match what we expect.
:param metadata: a metadata dictionary
:return: Nothing
'''
'''
# We give python-rgain some leeway here by specifying a tolerance. It's not perfectly consistent across codecs...
assert abs(metadata['cuein']) < tolerance_seconds
assert abs(metadata['cueout'] - length_seconds) < tolerance_seconds
'''
tolerance = 0.30
expected_replaygain = 5.0
print metadata['replay_gain']
assert abs(metadata['replay_gain'] - expected_replaygain) < tolerance
def test_missing_replaygain():
old_rg = ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE
ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = 'foosdaf'
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
ReplayGainAnalyzer.REPLAYGAIN_EXECUTABLE = old_rg # Need to put this back
def test_invalid_filepath():
metadata = ReplayGainAnalyzer.analyze(u'non-existent-file', dict())
def test_mp3_utf8():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
check_default_metadata(metadata)
test_mp3_utf8.rgain = True
def test_mp3_dualmono():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-dualmono.mp3', dict())
check_default_metadata(metadata)
test_mp3_dualmono.rgain = True
def test_mp3_jointstereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-jointstereo.mp3', dict())
check_default_metadata(metadata)
test_mp3_jointstereo.rgain = True
def test_mp3_simplestereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-simplestereo.mp3', dict())
check_default_metadata(metadata)
test_mp3_simplestereo.rgain = True
def test_mp3_stereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.mp3', dict())
check_default_metadata(metadata)
test_mp3_stereo.rgain = True
def test_mp3_mono():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mono.mp3', dict())
check_default_metadata(metadata)
test_mp3_mono.rgain = True
def test_ogg_stereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.ogg', dict())
check_default_metadata(metadata)
test_ogg_stereo = True
def test_invalid_wma():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict())
test_invalid_wma.rgain = True
def test_mp3_missing_id3_header():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mp3-missingid3header.mp3', dict())
test_mp3_missing_id3_header.rgain = True
def test_m4a_stereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.m4a', dict())
check_default_metadata(metadata)
test_m4a_stereo.rgain = True
''' WAVE is not supported by python-rgain yet
def test_wav_stereo():
metadata = ReplayGainAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.wav', dict())
check_default_metadata(metadata)
test_wav_stereo.rgain = True
'''
| agpl-3.0 |
jeremiahyan/odoo | addons/stock/wizard/stock_scheduler_compute.py | 2 | 1938 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
#
# Order Point Method:
# - Order if the virtual stock of today is below the min of the defined order point
#
from odoo import api, models, tools
import logging
import threading
_logger = logging.getLogger(__name__)
class StockSchedulerCompute(models.TransientModel):
_name = 'stock.scheduler.compute'
_description = 'Run Scheduler Manually'
def _procure_calculation_orderpoint(self):
with api.Environment.manage():
# As this function is in a new thread, I need to open a new cursor, because the old one may be closed
new_cr = self.pool.cursor()
self = self.with_env(self.env(cr=new_cr))
scheduler_cron = self.sudo().env.ref('stock.ir_cron_scheduler_action')
# Avoid to run the scheduler multiple times in the same time
try:
with tools.mute_logger('odoo.sql_db'):
self._cr.execute("SELECT id FROM ir_cron WHERE id = %s FOR UPDATE NOWAIT", (scheduler_cron.id,))
except Exception:
_logger.info('Attempt to run procurement scheduler aborted, as already running')
self._cr.rollback()
self._cr.close()
return {}
for company in self.env.user.company_ids:
cids = (self.env.user.company_id | self.env.user.company_ids).ids
self.env['procurement.group'].with_context(allowed_company_ids=cids).run_scheduler(
use_new_cursor=self._cr.dbname,
company_id=company.id)
new_cr.close()
return {}
def procure_calculation(self):
threaded_calculation = threading.Thread(target=self._procure_calculation_orderpoint, args=())
threaded_calculation.start()
return {'type': 'ir.actions.client', 'tag': 'reload'}
| gpl-3.0 |
KyleJamesWalker/ansible-modules-core | cloud/amazon/ec2_vpc_net.py | 35 | 9734 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_net
short_description: Configure AWS virtual private clouds
description:
- Create or terminate AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "2.0"
author: Jonathan Davila (@defionscode)
options:
name:
description:
- The name to give your VPC. This is used in combination with the cidr_block paramater to determine if a VPC already exists.
required: yes
cidr_block:
description:
- The CIDR of the VPC
required: yes
tenancy:
description:
- Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
required: false
default: default
choices: [ 'default', 'dedicated' ]
dns_support:
description:
- Whether to enable AWS DNS support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dns_hostnames:
description:
- Whether to enable AWS hostname support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dhcp_opts_id:
description:
- the id of the DHCP options to use for this vpc
default: null
required: false
tags:
description:
- The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different.
default: None
required: false
aliases: [ 'resource_tags' ]
state:
description:
- The state of the VPC. Either absent or present.
default: present
required: false
choices: [ 'present', 'absent' ]
multi_ok:
description:
- By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created.
default: false
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create a VPC with dedicate tenancy and a couple of tags
- ec2_vpc_net:
name: Module_dev2
cidr_block: 10.10.0.0/16
region: us-east-1
tags:
module: ec2_vpc_net
this: works
tenancy: dedicated
'''
try:
import boto
import boto.ec2
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def vpc_exists(module, vpc, name, cidr_block, multi):
"""Returns True or False in regards to the existence of a VPC. When supplied
with a CIDR, it will check for matching tags to determine if it is a match
otherwise it will assume the VPC does not exist and thus return false.
"""
matched_vpc = None
try:
matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block})
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
if len(matching_vpcs) == 1:
matched_vpc = matching_vpcs[0]
elif len(matching_vpcs) > 1:
if multi:
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
'CIDR block you specified. If you would like to create '
'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
return matched_vpc
def update_vpc_tags(vpc, module, vpc_obj, tags, name):
if tags is None:
tags = dict()
tags.update({'Name': name})
try:
current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id}))
if cmp(tags, current_tags):
vpc.create_tags(vpc_obj.id, tags)
return True
else:
return False
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
if vpc_obj.dhcp_options_id != dhcp_id:
connection.associate_dhcp_options(dhcp_id, vpc_obj.id)
return True
else:
return False
def get_vpc_values(vpc_obj):
if vpc_obj is not None:
vpc_values = vpc_obj.__dict__
if "region" in vpc_values:
vpc_values.pop("region")
if "item" in vpc_values:
vpc_values.pop("item")
if "connection" in vpc_values:
vpc_values.pop("connection")
return vpc_values
else:
return None
def main():
argument_spec=ec2_argument_spec()
argument_spec.update(dict(
name = dict(type='str', default=None, required=True),
cidr_block = dict(type='str', default=None, required=True),
tenancy = dict(choices=['default', 'dedicated'], default='default'),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
dhcp_opts_id = dict(type='str', default=None, required=False),
tags = dict(type='dict', required=False, default=None, aliases=['resource_tags']),
state = dict(choices=['present', 'absent'], default='present'),
multi_ok = dict(type='bool', default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
name=module.params.get('name')
cidr_block=module.params.get('cidr_block')
tenancy=module.params.get('tenancy')
dns_support=module.params.get('dns_support')
dns_hostnames=module.params.get('dns_hostnames')
dhcp_id=module.params.get('dhcp_opts_id')
tags=module.params.get('tags')
state=module.params.get('state')
multi=module.params.get('multi_ok')
changed=False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if dns_hostnames and not dns_support:
module.fail_json('In order to enable DNS Hostnames you must also enable DNS support')
if state == 'present':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is None:
try:
vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy)
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
if dhcp_id is not None:
try:
if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
if tags is not None or name is not None:
try:
if update_vpc_tags(connection, module, vpc_obj, tags, name):
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
# Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute
# which is needed in order to detect the current status of DNS options. For now we just update
# the attribute each time and is not used as a changed-factor.
try:
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support)
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames)
except BotoServerError, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
# get the vpc obj again in case it has changed
try:
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
except BotoServerError, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
elif state == 'absent':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is not None:
try:
connection.delete_vpc(vpc_obj.id)
vpc_obj = None
changed = True
except BotoServerError, e:
e_msg = boto_exception(e)
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
"and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
public-ink/public-ink | server/appengine/lib/numpy/distutils/__config__.py | 2 | 1398 | # This file is generated by /private/var/folders/w8/mp6r1h_s51dfzfhtclzq7jdjj2krv0/T/pip-build-_1SLS7/numpy/-c
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
atlas_3_10_blas_info={}
atlas_3_10_blas_threads_info={}
atlas_threads_info={}
blas_opt_info={'extra_link_args': ['-Wl,-framework', '-Wl,Accelerate'], 'define_macros': [('NO_ATLAS_INFO', 3), ('HAVE_CBLAS', None)], 'extra_compile_args': ['-msse3', '-I/System/Library/Frameworks/vecLib.framework/Headers']}
blis_info={}
atlas_blas_threads_info={}
openblas_info={}
lapack_opt_info={'extra_link_args': ['-Wl,-framework', '-Wl,Accelerate'], 'define_macros': [('NO_ATLAS_INFO', 3), ('HAVE_CBLAS', None)], 'extra_compile_args': ['-msse3']}
openblas_lapack_info={}
atlas_3_10_threads_info={}
atlas_info={}
atlas_3_10_info={}
lapack_mkl_info={}
blas_mkl_info={}
atlas_blas_info={}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
| gpl-3.0 |
alanswanson/webserver | qa/180-RuleExists2.py | 8 | 1074 | from base import *
FILE1 = 'special_file_for_180'
FILE2 = 'foobar180_file'
MAGIC = 'Alvaro: http://www.alobbs.com/'
FORBIDDEN = 'This is forbidden string'
CONF = """
vserver!1!rule!1800!match = exists
vserver!1!rule!1800!match!exists = %s
vserver!1!rule!1800!match!final = 1
vserver!1!rule!1800!handler = file
vserver!1!rule!1801!match = exists
vserver!1!rule!1801!match!exists = %s
vserver!1!rule!1801!match!final = 1
vserver!1!rule!1801!handler = cgi
""" % (FILE1, FILE2)
CGI_BASE = """#!/bin/sh
echo "Content-type: text/html"
echo ""
cat << EOF
%s
EOF
"""
class Test (TestBase):
def __init__ (self):
TestBase.__init__ (self, __file__)
self.name = "Rule Exists: match 2"
self.request = "GET /%s HTTP/1.0\r\n" % (FILE2)
self.forbidden_content = ['/bin/sh', 'echo', FORBIDDEN]
self.expected_error = 200
self.conf = CONF
def Prepare (self, www):
self.WriteFile (www, FILE1, 0555, FORBIDDEN)
self.WriteFile (www, FILE2, 0555, CGI_BASE % (MAGIC))
| gpl-2.0 |
Tomtomgo/phantomjs | src/qt/qtwebkit/Tools/BuildSlaveSupport/build.webkit.org-config/committer_auth_unittest.py | 123 | 9892 | #!/usr/bin/env python
#
# Copyright (C) 2011 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
import __builtin__
import buildbot.status.web.auth
import contextlib
import os
import unittest
from committer_auth import CommitterAuth
# This subclass of StringIO supports the context manager protocol so it works
# with "with" statements, just like real files.
class CMStringIO(StringIO.StringIO):
def __enter__(self):
return self
def __exit__(self, exception, value, traceback):
self.close()
@contextlib.contextmanager
def open_override(func):
original_open = __builtin__.open
__builtin__.open = func
yield
__builtin__.open = original_open
class CommitterAuthTest(unittest.TestCase):
def setUp(self):
self.auth = CommitterAuth('path/to/auth.json')
self.auth.open_auth_json_file = self.fake_auth_json_file
self.auth.open_webkit_committers_file = self.fake_committers_file
self.auth.open_trac_credentials_file = self.fake_htdigest_file
def fake_open_function(self, expected_filename):
def fake_open(name, mode='r'):
self.fake_open_was_called = True
self.assertEqual(expected_filename, name)
return fake_open
def test_authentication_success(self):
self.assertTrue(self.auth.authenticate('committer@webkit.org', 'committerpassword'))
self.assertEqual('', self.auth.errmsg())
self.assertTrue(self.auth.authenticate('committer2@example.com', 'committer2password'))
self.assertEqual('', self.auth.errmsg())
def test_committer_without_trac_credentials_fails(self):
self.assertFalse(self.auth.authenticate('committer3@webkit.org', 'committer3password'))
self.assertEqual('Invalid username/password', self.auth.errmsg())
def test_fail_to_open_auth_json_file(self):
def raise_IOError():
raise IOError(2, 'No such file or directory', 'path/to/auth.json')
auth = CommitterAuth('path/to/auth.json')
auth.open_auth_json_file = raise_IOError
self.assertFalse(auth.authenticate('committer@webkit.org', 'committerpassword'))
self.assertEqual('Error opening auth.json file: No such file or directory', auth.errmsg())
def test_fail_to_open_trac_credentials_file(self):
def raise_IOError():
raise IOError(2, 'No such file or directory', 'path/to/trac/credentials')
self.auth.open_trac_credentials_file = raise_IOError
self.assertFalse(self.auth.authenticate('committer@webkit.org', 'committerpassword'))
self.assertEqual('Error opening Trac credentials file: No such file or directory', self.auth.errmsg())
def test_fail_to_open_webkit_committers_file(self):
def raise_IOError():
raise IOError(2, 'No such file or directory', 'path/to/webkit/committers')
self.auth.open_webkit_committers_file = raise_IOError
self.assertFalse(self.auth.authenticate('committer@webkit.org', 'committerpassword'))
self.assertEqual('Error opening WebKit committers file: No such file or directory', self.auth.errmsg())
def test_implements_IAuth(self):
self.assertTrue(buildbot.status.web.auth.IAuth.implementedBy(CommitterAuth))
def test_invalid_auth_json_file(self):
auth = CommitterAuth('path/to/auth.json')
auth.open_auth_json_file = self.invalid_auth_json_file
self.assertFalse(auth.authenticate('committer@webkit.org', 'committerpassword'))
self.assertEqual('Error parsing auth.json file: No JSON object could be decoded', auth.errmsg())
def test_invalid_committers_file(self):
self.auth.open_webkit_committers_file = self.invalid_committers_file
self.assertFalse(self.auth.authenticate('committer@webkit.org', 'committerpassword'))
self.assertEqual('Error parsing WebKit committers file', self.auth.errmsg())
def test_invalid_trac_credentials_file(self):
self.auth.open_trac_credentials_file = self.invalid_htdigest_file
self.assertFalse(self.auth.authenticate('committer@webkit.org', 'committerpassword'))
self.assertEqual('Error parsing Trac credentials file', self.auth.errmsg())
def test_missing_auth_json_keys(self):
auth = CommitterAuth('path/to/auth.json')
auth.open_auth_json_file = lambda: CMStringIO('{ "trac_credentials": "path/to/trac/credentials" }')
self.assertFalse(auth.authenticate('committer@webkit.org', 'committerpassword'))
self.assertEqual('auth.json file is missing "webkit_committers" key', auth.errmsg())
auth.open_auth_json_file = lambda: CMStringIO('{ "webkit_committers": "path/to/webkit/committers" }')
auth.open_webkit_committers_file = self.fake_committers_file
self.assertFalse(auth.authenticate('committer@webkit.org', 'committerpassword'))
self.assertEqual('auth.json file is missing "trac_credentials" key', auth.errmsg())
def test_open_auth_json_file(self):
auth = CommitterAuth('path/to/auth.json')
self.fake_open_was_called = False
with open_override(self.fake_open_function(auth.auth_json_filename())):
auth.open_auth_json_file()
self.assertTrue(self.fake_open_was_called)
def test_open_trac_credentials_file(self):
auth = CommitterAuth('path/to/auth.json')
auth.trac_credentials_filename = lambda: 'trac credentials filename'
self.fake_open_was_called = False
with open_override(self.fake_open_function(auth.trac_credentials_filename())):
auth.open_trac_credentials_file()
self.assertTrue(self.fake_open_was_called)
def test_open_webkit_committers_file(self):
auth = CommitterAuth('path/to/auth.json')
auth.webkit_committers_filename = lambda: 'webkit committers filename'
self.fake_open_was_called = False
with open_override(self.fake_open_function(auth.webkit_committers_filename())):
auth.open_webkit_committers_file()
self.assertTrue(self.fake_open_was_called)
def test_non_committer_fails(self):
self.assertFalse(self.auth.authenticate('noncommitter@example.com', 'noncommitterpassword'))
self.assertEqual('Invalid username/password', self.auth.errmsg())
def test_trac_credentials_filename(self):
self.assertEqual('path/to/trac/credentials', self.auth.trac_credentials_filename())
def test_unknown_user_fails(self):
self.assertFalse(self.auth.authenticate('nobody@example.com', 'nobodypassword'))
self.assertEqual('Invalid username/password', self.auth.errmsg())
def test_username_is_prefix_of_valid_user(self):
self.assertFalse(self.auth.authenticate('committer@webkit.orgg', 'committerpassword'))
self.assertEqual('Invalid username/password', self.auth.errmsg())
def test_webkit_committers(self):
self.assertEqual(['committer@webkit.org', 'committer2@example.com', 'committer3@webkit.org'], self.auth.webkit_committers())
def test_webkit_committers_filename(self):
self.assertEqual('path/to/webkit/committers', self.auth.webkit_committers_filename())
def test_wrong_password_fails(self):
self.assertFalse(self.auth.authenticate('committer@webkit.org', 'wrongpassword'))
self.assertEqual('Invalid username/password', self.auth.errmsg())
def fake_auth_json_file(self):
return CMStringIO("""{
"trac_credentials": "path/to/trac/credentials",
"webkit_committers": "path/to/webkit/committers"
}""")
def invalid_auth_json_file(self):
return CMStringIO('~!@#$%^&*()_+')
def fake_committers_file(self):
return CMStringIO("""[groups]
group1 = user@example.com,user2@example.com
group2 = user3@example.com
group3 =
group4 =
webkit = committer@webkit.org,committer2@example.com,committer3@webkit.org
[service:/]
* = r
""")
def invalid_committers_file(self):
return CMStringIO("""[groups]
[[groups2]
""")
def fake_htdigest_file(self):
return CMStringIO("""committer@webkit.org:Mac OS Forge:761c8dcb7d9b5908007ed142f62fe73a
committer2@example.com:Mac OS Forge:faeee69acc2e49af3a0dbb15bd593ef4
noncommitter@example.com:Mac OS Forge:b99aa7ad32306a654ca4d57839fde9c1
""")
def invalid_htdigest_file(self):
return CMStringIO("""committer@webkit.org:Mac OS Forge:761c8dcb7d9b5908007ed142f62fe73a
committer2@example.com:Mac OS Forge:faeee69acc2e49af3a0dbb15bd593ef4
noncommitter@example.com:Mac OS Forge:b99aa7ad32306a654ca4d57839fde9c1
committer4@example.com:Mac OS Forge:::
""")
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
kastriothaliti/techstitution | venv/lib/python2.7/site-packages/werkzeug/contrib/atom.py | 259 | 15588 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from werkzeug.utils import escape
from werkzeug.wrappers import BaseResponse
from werkzeug._compat import implements_to_string, string_types
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
iso8601 = obj.isoformat()
if obj.tzinfo:
return iso8601
return iso8601 + 'Z'
@implements_to_string
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
Treated as UTC if naive datetime.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': 'Unknown author'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' %
(k, escape(link[k])) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __str__(self):
return self.to_string()
@implements_to_string
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Treated as
UTC if naive datetime. Required.
:param author: the author of the entry. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if the feed does not have an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object. Treated as
UTC if naive datetime.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param categories: categories for the entry. Must be a list of dictionaries
with term (required), scheme and label (all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author', ())
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.categories = kwargs.get('categories', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' %
(k, escape(link[k])) for k in link)
for category in self.categories:
yield u' <category %s/>\n' % ''.join('%s="%s" ' %
(k, escape(category[k])) for k in category)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __str__(self):
return self.to_string()
| gpl-3.0 |
jordanemedlock/psychtruths | temboo/Library/SendGrid/NewsletterAPI/Lists/RenameRecipientList.py | 5 | 4167 | # -*- coding: utf-8 -*-
###############################################################################
#
# RenameRecipientList
# Rename a recipient list.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class RenameRecipientList(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the RenameRecipientList Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(RenameRecipientList, self).__init__(temboo_session, '/Library/SendGrid/NewsletterAPI/Lists/RenameRecipientList')
def new_input_set(self):
return RenameRecipientListInputSet()
def _make_result_set(self, result, path):
return RenameRecipientListResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return RenameRecipientListChoreographyExecution(session, exec_id, path)
class RenameRecipientListInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the RenameRecipientList
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key obtained from SendGrid.)
"""
super(RenameRecipientListInputSet, self)._set_input('APIKey', value)
def set_APIUser(self, value):
"""
Set the value of the APIUser input for this Choreo. ((required, string) The username registered with SendGrid.)
"""
super(RenameRecipientListInputSet, self)._set_input('APIUser', value)
def set_List(self, value):
"""
Set the value of the List input for this Choreo. ((required, string) The name of Recipient List that is to be renamed.)
"""
super(RenameRecipientListInputSet, self)._set_input('List', value)
def set_NewList(self, value):
"""
Set the value of the NewList input for this Choreo. ((required, string) The new name of Recipient List that is to be renamed.)
"""
super(RenameRecipientListInputSet, self)._set_input('NewList', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format of the response from SendGrid, in either json, or xml. Default is set to json.)
"""
super(RenameRecipientListInputSet, self)._set_input('ResponseFormat', value)
class RenameRecipientListResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the RenameRecipientList Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from SendGrid. The format corresponds to the ResponseFormat input. Default is json.)
"""
return self._output.get('Response', None)
class RenameRecipientListChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return RenameRecipientListResultSet(response, path)
| apache-2.0 |
DBuildService/atomic-reactor | tests/utils/test_cachito.py | 1 | 11889 | """
Copyright (c) 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from atomic_reactor.utils.cachito import (
CachitoAPI, CachitoAPIInvalidRequest, CachitoAPIRequestTimeout, CachitoAPIUnsuccessfulRequest)
from requests.exceptions import HTTPError
import flexmock
import pytest
import responses
import json
import os.path
import re
import time
from datetime import datetime
from textwrap import dedent
CACHITO_URL = 'http://cachito.example.com'
CACHITO_REQUEST_ID = 123
CACHITO_REQUEST_DOWNLOAD_URL = \
'{}/api/v1/requests/{}/download'.format(CACHITO_URL, CACHITO_REQUEST_ID)
CACHITO_REQUEST_REF = 'e1be527f39ec31323f0454f7d1422c6260b00580'
CACHITO_REQUEST_REPO = 'https://github.com/release-engineering/retrodep.git'
@responses.activate
@pytest.mark.parametrize('additional_params', (
{},
{'flags': ['spam', 'bacon']},
{'pkg_managers': ['gomod']},
{'pkg_managers': []},
{'pkg_managers': None},
{'user': 'ham'},
{'dependency_replacements': [{
'name': 'eample.com/repo/project',
'type': 'gomod',
'version': '1.1.1',
}]
},
{'packages': {'npm': [{'path': 'client'}]}},
{'packages': None},
))
def test_request_sources(additional_params, caplog):
response_data = {'id': CACHITO_REQUEST_ID}
def handle_request_sources(http_request):
body_json = json.loads(http_request.body)
assert body_json['repo'] == CACHITO_REQUEST_REPO
assert body_json['ref'] == CACHITO_REQUEST_REF
for key, value in additional_params.items():
if value is not None:
assert body_json[key] == value
else:
assert key not in body_json
return (201, {}, json.dumps(response_data))
responses.add_callback(
responses.POST,
'{}/api/v1/requests'.format(CACHITO_URL),
content_type='application/json',
callback=handle_request_sources)
api = CachitoAPI(CACHITO_URL)
response = api.request_sources(CACHITO_REQUEST_REPO, CACHITO_REQUEST_REF, **additional_params)
assert response['id'] == CACHITO_REQUEST_ID
response_json = 'Cachito response:\n{}'.format(json.dumps(response_data, indent=4))
# Since Python 3.7 logger adds additional whitespaces by default -> checking without them
assert re.sub(r'\s+', " ", response_json) in re.sub(r'\s+', " ", caplog.text)
@responses.activate
@pytest.mark.parametrize(('status_code', 'error', 'error_body'), (
(400, CachitoAPIInvalidRequest, json.dumps({'error': 'read the docs, please'})),
(500, HTTPError, 'Internal Server Error'),
))
def test_request_sources_error(status_code, error, error_body, caplog):
responses.add(
responses.POST,
'{}/api/v1/requests'.format(CACHITO_URL),
content_type='application/json',
body=error_body,
status=status_code,
)
with pytest.raises(error):
CachitoAPI(CACHITO_URL).request_sources(CACHITO_REQUEST_REPO, CACHITO_REQUEST_REF)
try:
response_data = json.loads(error_body)
except ValueError: # json.JSONDecodeError in py3
assert 'Cachito response' not in caplog.text
else:
response_json = 'Cachito response:\n{}'.format(json.dumps(response_data, indent=4))
# Since Python 3.7 logger adds additional whitespaces by default -> checking without them
assert re.sub(r'\s+', " ", response_json) in re.sub(r'\s+', " ", caplog.text)
@responses.activate
@pytest.mark.parametrize('burst_params', (
{'burst_retry': 0.01, 'burst_length': 0.5, 'slow_retry': 0.2},
# Set the burst_length to lower than burst_retry to trigger the slow_retry :)
{'burst_retry': 0.01, 'burst_length': 0.001, 'slow_retry': 0.01},
))
@pytest.mark.parametrize('cachito_request', (
CACHITO_REQUEST_ID,
{'id': CACHITO_REQUEST_ID},
))
def test_wait_for_request(burst_params, cachito_request, caplog):
states = ['in_progress', 'in_progress', 'complete']
updated = datetime.utcnow().isoformat()
expected_total_responses_calls = len(states)
expected_final_state = states[-1]
def handle_wait_for_request(http_request):
state = states.pop(0)
return (200, {}, json.dumps({'id': CACHITO_REQUEST_ID, 'state': state, 'updated': updated}))
request_url = '{}/api/v1/requests/{}'.format(CACHITO_URL, CACHITO_REQUEST_ID)
responses.add_callback(
responses.GET,
request_url,
content_type='application/json',
callback=handle_wait_for_request)
response = CachitoAPI(CACHITO_URL).wait_for_request(cachito_request, **burst_params)
assert response['id'] == CACHITO_REQUEST_ID
assert response['state'] == expected_final_state
assert len(responses.calls) == expected_total_responses_calls
expect_in_logs = dedent(
"""\
Request {} is complete
Request url: {}
"""
).format(CACHITO_REQUEST_ID, request_url)
# Since Python 3.7 logger adds additional whitespaces by default -> checking without them
assert re.sub(r'\s+', " ", expect_in_logs) in re.sub(r'\s+', r" ", caplog.text)
@responses.activate
@pytest.mark.parametrize('timeout', (0, 60))
def test_wait_for_request_timeout(timeout, caplog):
request_url = '{}/api/v1/requests/{}'.format(CACHITO_URL, CACHITO_REQUEST_ID)
updated = datetime.utcnow().isoformat()
response_data = {'id': CACHITO_REQUEST_ID, 'state': 'in_progress', 'updated': updated}
responses.add(
responses.GET,
request_url,
content_type='application/json',
status=200,
body=json.dumps(response_data),
)
flexmock(time).should_receive('time').and_return(2000, 1000).one_by_one()
# Hit the timeout during bursting to make the test faster
burst_params = {'burst_retry': 0.001, 'burst_length': 0.02}
with pytest.raises(CachitoAPIRequestTimeout):
api = CachitoAPI(CACHITO_URL, timeout=timeout)
api.wait_for_request(CACHITO_REQUEST_ID, **burst_params)
in_progress_response_json = json.dumps(response_data, indent=4)
expect_in_logs = dedent(
"""\
Request {} not completed after {} seconds of not being updated
Details: {}
"""
).format(request_url, timeout, in_progress_response_json)
# Since Python 3.7 logger adds additional whitespaces by default -> checking without them
assert re.sub(r'\s+', " ", expect_in_logs) in re.sub(r'\s+', " ", caplog.text)
@responses.activate
@pytest.mark.parametrize('error_state,error_reason',
[('failed', 'Cloning the Git repository failed'),
('stale', 'The request has expired')])
def test_wait_for_unsuccessful_request(error_state, error_reason, caplog):
states = ['in_progress', 'in_progress', error_state]
updated = datetime.utcnow().isoformat()
expected_total_responses_calls = len(states)
def handle_wait_for_request(http_request):
state = states.pop(0)
return (200, {}, json.dumps({'state_reason': error_reason,
'repo': CACHITO_REQUEST_REPO,
'state': state,
'ref': CACHITO_REQUEST_REF,
'id': CACHITO_REQUEST_ID,
'updated': updated,
}))
responses.add_callback(
responses.GET,
'{}/api/v1/requests/{}'.format(CACHITO_URL, CACHITO_REQUEST_ID),
content_type='application/json',
callback=handle_wait_for_request)
burst_params = {'burst_retry': 0.001, 'burst_length': 0.5}
with pytest.raises(CachitoAPIUnsuccessfulRequest):
CachitoAPI(CACHITO_URL).wait_for_request(CACHITO_REQUEST_ID, **burst_params)
assert len(responses.calls) == expected_total_responses_calls
failed_response_json = json.dumps(
{'state_reason': error_reason,
'repo': CACHITO_REQUEST_REPO,
'state': error_state,
'ref': CACHITO_REQUEST_REF,
'id': CACHITO_REQUEST_ID,
'updated': updated,
},
indent=4
)
expect_in_logs = dedent(
"""\
Request {} is in "{}" state: {}
Details: {}
"""
).format(CACHITO_REQUEST_ID, error_state, error_reason, failed_response_json)
# Since Python 3.7 logger adds additional whitespaces by default -> checking without them
assert re.sub(r'\s+', " ", expect_in_logs) in re.sub(r'\s+', " ", caplog.text)
@responses.activate
@pytest.mark.parametrize('error_state,error_reason',
[('failed', 'Cloning the Git repository failed'),
('stale', 'The request has expired')])
def test_check_CachitoAPIUnsuccessfulRequest_text(error_state, error_reason, caplog):
states = ['in_progress', 'in_progress', error_state]
updated = datetime.utcnow().isoformat()
expected_total_responses_calls = len(states)
cachito_request_url = '{}/api/v1/requests/{}'.format(CACHITO_URL, CACHITO_REQUEST_ID)
def handle_wait_for_request(http_request):
state = states.pop(0)
return (200, {}, json.dumps({'state_reason': error_reason,
'repo': CACHITO_REQUEST_REPO,
'state': state,
'ref': CACHITO_REQUEST_REF,
'id': CACHITO_REQUEST_ID,
'updated': updated,
}))
responses.add_callback(
responses.GET,
'{}/api/v1/requests/{}'.format(CACHITO_URL, CACHITO_REQUEST_ID),
content_type='application/json',
callback=handle_wait_for_request)
burst_params = {'burst_retry': 0.001, 'burst_length': 0.5}
expected_exc_text = dedent('''\
Cachito request is in "{}" state, reason: {}
Request {} ({}) tried to get repo '{}' at reference '{}'.
'''.format(error_state, error_reason, CACHITO_REQUEST_ID,
cachito_request_url, CACHITO_REQUEST_REPO,
CACHITO_REQUEST_REF))
with pytest.raises(CachitoAPIUnsuccessfulRequest) as excinfo:
CachitoAPI(CACHITO_URL).wait_for_request(CACHITO_REQUEST_ID, **burst_params)
assert len(responses.calls) == expected_total_responses_calls
assert expected_exc_text in str(excinfo.value)
def test_wait_for_request_bad_request_type():
with pytest.raises(ValueError, match=r'Unexpected request type'):
CachitoAPI(CACHITO_URL).wait_for_request('spam')
@responses.activate
@pytest.mark.parametrize('cachito_request', (
CACHITO_REQUEST_ID,
{'id': CACHITO_REQUEST_ID},
))
def test_download_sources(tmpdir, cachito_request):
blob = 'glop-glop-I\'m-a-blob'
expected_dest_path = os.path.join(str(tmpdir), 'remote-source.tar.gz')
responses.add(
responses.GET,
'{}/api/v1/requests/{}/download'.format(CACHITO_URL, CACHITO_REQUEST_ID),
body=blob)
dest_path = CachitoAPI(CACHITO_URL).download_sources(cachito_request, str(tmpdir))
assert dest_path == expected_dest_path
with open(dest_path) as f:
assert f.read() == blob
def test_download_sources_bad_request_type(tmpdir):
with pytest.raises(ValueError, match=r'Unexpected request type'):
CachitoAPI(CACHITO_URL).download_sources('spam', str(tmpdir))
@pytest.mark.parametrize('cachito_request', (
CACHITO_REQUEST_ID,
{'id': CACHITO_REQUEST_ID},
))
def test_assemble_download_url(tmpdir, cachito_request):
url = CachitoAPI(CACHITO_URL).assemble_download_url(cachito_request)
assert url == CACHITO_REQUEST_DOWNLOAD_URL
| bsd-3-clause |
mylene-campana/hpp-rbprm-corba | src/hpp/corbaserver/rbprm/tools/generateROMs.py | 4 | 2394 | from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.corbaserver.rbprm.rbprmfullbody import FullBody
from hpp.gepetto import Viewer
packageName = "hrp2_14_description"
meshPackageName = "hrp2_14_description"
rootJointType = "freeflyer"
##
# Information to retrieve urdf and srdf files.
urdfName = "hrp2_14"
urdfSuffix = "_reduced"
srdfSuffix = ""
fullBody = FullBody ()
fullBody.loadFullBodyModel(urdfName, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
fullBody.setJointBounds ("base_joint_xyz", [-1,1, -4, -1, 1, 2.2])
from hpp.corbaserver.rbprm.problem_solver import ProblemSolver
nbSamples = 50000
ps = ProblemSolver( fullBody )
#~ AFTER loading obstacles
rLegId = '7rLeg'
rLeg = 'RLEG_JOINT0'
rLegOffset = [0,-0.105,0,]
rLegNormal = [0,1,0]
rLegx = 0.09; rLegy = 0.05
fullBody.addLimb(rLegId,rLeg,'',rLegOffset,rLegNormal, rLegx, rLegy, nbSamples, 0.01)
lLegId = '8lLeg'
lLeg = 'LLEG_JOINT0'
lLegOffset = [0,-0.105,0]
lLegNormal = [0,1,0]
lLegx = 0.09; lLegy = 0.05
fullBody.addLimb(lLegId,lLeg,'',lLegOffset,rLegNormal, lLegx, lLegy, nbSamples, 0.01)
rarmId = '3Rarm'
rarm = 'RARM_JOINT0'
rHand = 'RARM_JOINT5'
rArmOffset = [-0.05,-0.050,-0.050]
rArmNormal = [1,0,0]
rArmx = 0.024; rArmy = 0.024
fullBody.addLimb(rarmId,rarm,rHand,rArmOffset,rArmNormal, rArmx, rArmy, nbSamples, 0.01)
#~ AFTER loading obstacles
larmId = '4Larm'
larm = 'LARM_JOINT0'
lHand = 'LARM_JOINT5'
lArmOffset = [-0.05,-0.050,-0.050]
lArmNormal = [1,0,0]
lArmx = 0.024; lArmy = 0.024
fullBody.addLimb(larmId,larm,lHand,lArmOffset,lArmNormal, lArmx, lArmy, nbSamples, 0.01)
q_0 = fullBody.getCurrentConfig ()
limit = nbSamples-1;
f1=open('./data/roms/rleg.erom', 'w+')
for i in range(0,limit):
q = fullBody.getSamplePosition(rLegId,i)
f1.write(str(q[0]) + "," + str(q[1]) + "," + str(q[2]) + "\n")
f1.close()
f1=open('./data/roms/lleg.erom', 'w+')
for i in range(0,limit):
q = fullBody.getSamplePosition(lLegId,i)
f1.write(str(q[0]) + "," + str(q[1]) + "," + str(q[2]) + "\n")
f1.close()
f1=open('./data/roms/rarm.erom', 'w+')
for i in range(0,limit):
q = fullBody.getSamplePosition(rarmId,i)
f1.write(str(q[0]) + "," + str(q[1]) + "," + str(q[2]) + "\n")
f1.close()
f1=open('./data/roms/larm.erom', 'w+')
for i in range(0,limit):
q = fullBody.getSamplePosition(larmId,i)
f1.write(str(q[0]) + "," + str(q[1]) + "," + str(q[2]) + "\n")
f1.close()
| lgpl-3.0 |
christoph-buente/phantomjs | src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/encoder.py | 484 | 25695 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for encoding protocol message primitives.
Contains the logic for encoding every logical protocol field type
into one of the 5 physical wire types.
This code is designed to push the Python interpreter's performance to the
limits.
The basic idea is that at startup time, for every field (i.e. every
FieldDescriptor) we construct two functions: a "sizer" and an "encoder". The
sizer takes a value of this field's type and computes its byte size. The
encoder takes a writer function and a value. It encodes the value into byte
strings and invokes the writer function to write those strings. Typically the
writer function is the write() method of a cStringIO.
We try to do as much work as possible when constructing the writer and the
sizer rather than when calling them. In particular:
* We copy any needed global functions to local variables, so that we do not need
to do costly global table lookups at runtime.
* Similarly, we try to do any attribute lookups at startup time if possible.
* Every field's tag is encoded to bytes at startup, since it can't change at
runtime.
* Whatever component of the field size we can compute at startup, we do.
* We *avoid* sharing code if doing so would make the code slower and not sharing
does not burden us too much. For example, encoders for repeated fields do
not just call the encoders for singular fields in a loop because this would
add an extra function call overhead for every loop iteration; instead, we
manually inline the single-value encoder into the loop.
* If a Python function lacks a return statement, Python actually generates
instructions to pop the result of the last statement off the stack, push
None onto the stack, and then return that. If we really don't care what
value is returned, then we can save two instructions by returning the
result of the last statement. It looks funny but it helps.
* We assume that type and bounds checking has happened at a higher level.
"""
__author__ = 'kenton@google.com (Kenton Varda)'
import struct
from google.protobuf.internal import wire_format
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
def _VarintSize(value):
"""Compute the size of a varint value."""
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _SignedVarintSize(value):
"""Compute the size of a signed varint value."""
if value < 0: return 10
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _TagSize(field_number):
"""Returns the number of bytes required to serialize a tag with this field
number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarintSize(wire_format.PackTag(field_number, 0))
# --------------------------------------------------------------------
# In this section we define some generic sizers. Each of these functions
# takes parameters specific to a particular field type, e.g. int32 or fixed64.
# It returns another function which in turn takes parameters specific to a
# particular field, e.g. the field number and whether it is repeated or packed.
# Look at the next section to see how these are used.
def _SimpleSizer(compute_value_size):
"""A sizer which uses the function compute_value_size to compute the size of
each value. Typically compute_value_size is _VarintSize."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(element)
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(element)
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(value)
return FieldSize
return SpecificSizer
def _ModifiedSizer(compute_value_size, modify_value):
"""Like SimpleSizer, but modify_value is invoked on each value before it is
passed to compute_value_size. modify_value is typically ZigZagEncode."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(modify_value(element))
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(modify_value(element))
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(modify_value(value))
return FieldSize
return SpecificSizer
def _FixedSizer(value_size):
"""Like _SimpleSizer except for a fixed-size field. The input is the size
of one value."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = len(value) * value_size
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
element_size = value_size + tag_size
def RepeatedFieldSize(value):
return len(value) * element_size
return RepeatedFieldSize
else:
field_size = value_size + tag_size
def FieldSize(value):
return field_size
return FieldSize
return SpecificSizer
# ====================================================================
# Here we declare a sizer constructor for each field type. Each "sizer
# constructor" is a function that takes (field_number, is_repeated, is_packed)
# as parameters and returns a sizer, which in turn takes a field value as
# a parameter and returns its encoded size.
Int32Sizer = Int64Sizer = EnumSizer = _SimpleSizer(_SignedVarintSize)
UInt32Sizer = UInt64Sizer = _SimpleSizer(_VarintSize)
SInt32Sizer = SInt64Sizer = _ModifiedSizer(
_SignedVarintSize, wire_format.ZigZagEncode)
Fixed32Sizer = SFixed32Sizer = FloatSizer = _FixedSizer(4)
Fixed64Sizer = SFixed64Sizer = DoubleSizer = _FixedSizer(8)
BoolSizer = _FixedSizer(1)
def StringSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a string field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element.encode('utf-8'))
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value.encode('utf-8'))
return tag_size + local_VarintSize(l) + l
return FieldSize
def BytesSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a bytes field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element)
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value)
return tag_size + local_VarintSize(l) + l
return FieldSize
def GroupSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a group field."""
tag_size = _TagSize(field_number) * 2
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += element.ByteSize()
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + value.ByteSize()
return FieldSize
def MessageSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a message field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = element.ByteSize()
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = value.ByteSize()
return tag_size + local_VarintSize(l) + l
return FieldSize
# --------------------------------------------------------------------
# MessageSet is special.
def MessageSetItemSizer(field_number):
"""Returns a sizer for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) +
_TagSize(3))
local_VarintSize = _VarintSize
def FieldSize(value):
l = value.ByteSize()
return static_size + local_VarintSize(l) + l
return FieldSize
# ====================================================================
# Encoders!
def _VarintEncoder():
"""Return an encoder for a basic varint value (does not include tag)."""
local_chr = chr
def EncodeVarint(write, value):
bits = value & 0x7f
value >>= 7
while value:
write(local_chr(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(local_chr(bits))
return EncodeVarint
def _SignedVarintEncoder():
"""Return an encoder for a basic signed varint value (does not include
tag)."""
local_chr = chr
def EncodeSignedVarint(write, value):
if value < 0:
value += (1 << 64)
bits = value & 0x7f
value >>= 7
while value:
write(local_chr(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(local_chr(bits))
return EncodeSignedVarint
_EncodeVarint = _VarintEncoder()
_EncodeSignedVarint = _SignedVarintEncoder()
def _VarintBytes(value):
"""Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast."""
pieces = []
_EncodeVarint(pieces.append, value)
return "".join(pieces)
def TagBytes(field_number, wire_type):
"""Encode the given tag and return the bytes. Only called at startup."""
return _VarintBytes(wire_format.PackTag(field_number, wire_type))
# --------------------------------------------------------------------
# As with sizers (see above), we have a number of common encoder
# implementations.
def _SimpleEncoder(wire_type, encode_value, compute_value_size):
"""Return a constructor for an encoder for fields of a particular type.
Args:
wire_type: The field's wire type, for encoding tags.
encode_value: A function which encodes an individual value, e.g.
_EncodeVarint().
compute_value_size: A function which computes the size of an individual
value, e.g. _VarintSize().
"""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(element)
local_EncodeVarint(write, size)
for element in value:
encode_value(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, value)
return EncodeField
return SpecificEncoder
def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value):
"""Like SimpleEncoder but additionally invokes modify_value on every value
before passing it to encode_value. Usually modify_value is ZigZagEncode."""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(modify_value(element))
local_EncodeVarint(write, size)
for element in value:
encode_value(write, modify_value(element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, modify_value(element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, modify_value(value))
return EncodeField
return SpecificEncoder
def _StructPackEncoder(wire_type, format):
"""Return a constructor for an encoder for a fixed-width field.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
value_size = struct.calcsize(format)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
write(local_struct_pack(format, element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
write(local_struct_pack(format, element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return write(local_struct_pack(format, value))
return EncodeField
return SpecificEncoder
def _FloatingPointEncoder(wire_type, format):
"""Return a constructor for an encoder for float fields.
This is like StructPackEncoder, but catches errors that may be due to
passing non-finite floating-point values to struct.pack, and makes a
second attempt to encode those values.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
value_size = struct.calcsize(format)
if value_size == 4:
def EncodeNonFiniteOrRaise(write, value):
# Remember that the serialized form uses little-endian byte order.
if value == _POS_INF:
write('\x00\x00\x80\x7F')
elif value == _NEG_INF:
write('\x00\x00\x80\xFF')
elif value != value: # NaN
write('\x00\x00\xC0\x7F')
else:
raise
elif value_size == 8:
def EncodeNonFiniteOrRaise(write, value):
if value == _POS_INF:
write('\x00\x00\x00\x00\x00\x00\xF0\x7F')
elif value == _NEG_INF:
write('\x00\x00\x00\x00\x00\x00\xF0\xFF')
elif value != value: # NaN
write('\x00\x00\x00\x00\x00\x00\xF8\x7F')
else:
raise
else:
raise ValueError('Can\'t encode floating-point values that are '
'%d bytes long (only 4 or 8)' % value_size)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
# This try/except block is going to be faster than any code that
# we could write to check whether element is finite.
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
try:
write(local_struct_pack(format, value))
except SystemError:
EncodeNonFiniteOrRaise(write, value)
return EncodeField
return SpecificEncoder
# ====================================================================
# Here we declare an encoder constructor for each field type. These work
# very similarly to sizer constructors, described earlier.
Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize)
UInt32Encoder = UInt64Encoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize)
SInt32Encoder = SInt64Encoder = _ModifiedEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize,
wire_format.ZigZagEncode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED32, '<f')
DoubleEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED64, '<d')
def BoolEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a boolean field."""
false_byte = chr(0)
true_byte = chr(1)
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value))
for element in value:
if element:
write(true_byte)
else:
write(false_byte)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
if element:
write(true_byte)
else:
write(false_byte)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeField(write, value):
write(tag_bytes)
if value:
return write(true_byte)
return write(false_byte)
return EncodeField
def StringEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a string field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
encoded = element.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
write(encoded)
return EncodeRepeatedField
else:
def EncodeField(write, value):
encoded = value.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
return write(encoded)
return EncodeField
def BytesEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a bytes field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(tag)
local_EncodeVarint(write, local_len(element))
write(element)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(tag)
local_EncodeVarint(write, local_len(value))
return write(value)
return EncodeField
def GroupEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a group field."""
start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP)
end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP)
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(start_tag)
element._InternalSerialize(write)
write(end_tag)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(start_tag)
value._InternalSerialize(write)
return write(end_tag)
return EncodeField
def MessageEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a message field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(tag)
local_EncodeVarint(write, element.ByteSize())
element._InternalSerialize(write)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(tag)
local_EncodeVarint(write, value.ByteSize())
return value._InternalSerialize(write)
return EncodeField
# --------------------------------------------------------------------
# As before, MessageSet is special.
def MessageSetItemEncoder(field_number):
"""Encoder for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
start_bytes = "".join([
TagBytes(1, wire_format.WIRETYPE_START_GROUP),
TagBytes(2, wire_format.WIRETYPE_VARINT),
_VarintBytes(field_number),
TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)])
end_bytes = TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_EncodeVarint = _EncodeVarint
def EncodeField(write, value):
write(start_bytes)
local_EncodeVarint(write, value.ByteSize())
value._InternalSerialize(write)
return write(end_bytes)
return EncodeField
| bsd-3-clause |
sunlianqiang/kbengine | kbe/res/scripts/common/Lib/ctypes/test/test_parameters.py | 80 | 6196 | import unittest, sys
from ctypes.test import need_symbol
class SimpleTypesTestCase(unittest.TestCase):
def setUp(self):
import ctypes
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
self.prev_conv_mode = set_conversion_mode("ascii", "strict")
def tearDown(self):
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
set_conversion_mode(*self.prev_conv_mode)
def test_subclasses(self):
from ctypes import c_void_p, c_char_p
# ctypes 0.9.5 and before did overwrite from_param in SimpleType_new
class CVOIDP(c_void_p):
def from_param(cls, value):
return value * 2
from_param = classmethod(from_param)
class CCHARP(c_char_p):
def from_param(cls, value):
return value * 4
from_param = classmethod(from_param)
self.assertEqual(CVOIDP.from_param("abc"), "abcabc")
self.assertEqual(CCHARP.from_param("abc"), "abcabcabcabc")
@need_symbol('c_wchar_p')
def test_subclasses_c_wchar_p(self):
from ctypes import c_wchar_p
class CWCHARP(c_wchar_p):
def from_param(cls, value):
return value * 3
from_param = classmethod(from_param)
self.assertEqual(CWCHARP.from_param("abc"), "abcabcabc")
# XXX Replace by c_char_p tests
def test_cstrings(self):
from ctypes import c_char_p, byref
# c_char_p.from_param on a Python String packs the string
# into a cparam object
s = b"123"
self.assertIs(c_char_p.from_param(s)._obj, s)
# new in 0.9.1: convert (encode) unicode to ascii
self.assertEqual(c_char_p.from_param(b"123")._obj, b"123")
self.assertRaises(TypeError, c_char_p.from_param, "123\377")
self.assertRaises(TypeError, c_char_p.from_param, 42)
# calling c_char_p.from_param with a c_char_p instance
# returns the argument itself:
a = c_char_p(b"123")
self.assertIs(c_char_p.from_param(a), a)
@need_symbol('c_wchar_p')
def test_cw_strings(self):
from ctypes import byref, c_wchar_p
c_wchar_p.from_param("123")
self.assertRaises(TypeError, c_wchar_p.from_param, 42)
self.assertRaises(TypeError, c_wchar_p.from_param, b"123\377")
pa = c_wchar_p.from_param(c_wchar_p("123"))
self.assertEqual(type(pa), c_wchar_p)
def test_int_pointers(self):
from ctypes import c_short, c_uint, c_int, c_long, POINTER, pointer
LPINT = POINTER(c_int)
## p = pointer(c_int(42))
## x = LPINT.from_param(p)
x = LPINT.from_param(pointer(c_int(42)))
self.assertEqual(x.contents.value, 42)
self.assertEqual(LPINT(c_int(42)).contents.value, 42)
self.assertEqual(LPINT.from_param(None), None)
if c_int != c_long:
self.assertRaises(TypeError, LPINT.from_param, pointer(c_long(42)))
self.assertRaises(TypeError, LPINT.from_param, pointer(c_uint(42)))
self.assertRaises(TypeError, LPINT.from_param, pointer(c_short(42)))
def test_byref_pointer(self):
# The from_param class method of POINTER(typ) classes accepts what is
# returned by byref(obj), it type(obj) == typ
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
LPINT = POINTER(c_int)
LPINT.from_param(byref(c_int(42)))
self.assertRaises(TypeError, LPINT.from_param, byref(c_short(22)))
if c_int != c_long:
self.assertRaises(TypeError, LPINT.from_param, byref(c_long(22)))
self.assertRaises(TypeError, LPINT.from_param, byref(c_uint(22)))
def test_byref_pointerpointer(self):
# See above
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
LPLPINT = POINTER(POINTER(c_int))
LPLPINT.from_param(byref(pointer(c_int(42))))
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_short(22))))
if c_int != c_long:
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_long(22))))
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_uint(22))))
def test_array_pointers(self):
from ctypes import c_short, c_uint, c_int, c_long, POINTER
INTARRAY = c_int * 3
ia = INTARRAY()
self.assertEqual(len(ia), 3)
self.assertEqual([ia[i] for i in range(3)], [0, 0, 0])
# Pointers are only compatible with arrays containing items of
# the same type!
LPINT = POINTER(c_int)
LPINT.from_param((c_int*3)())
self.assertRaises(TypeError, LPINT.from_param, c_short*3)
self.assertRaises(TypeError, LPINT.from_param, c_long*3)
self.assertRaises(TypeError, LPINT.from_param, c_uint*3)
def test_noctypes_argtype(self):
import _ctypes_test
from ctypes import CDLL, c_void_p, ArgumentError
func = CDLL(_ctypes_test.__file__)._testfunc_p_p
func.restype = c_void_p
# TypeError: has no from_param method
self.assertRaises(TypeError, setattr, func, "argtypes", (object,))
class Adapter(object):
def from_param(cls, obj):
return None
func.argtypes = (Adapter(),)
self.assertEqual(func(None), None)
self.assertEqual(func(object()), None)
class Adapter(object):
def from_param(cls, obj):
return obj
func.argtypes = (Adapter(),)
# don't know how to convert parameter 1
self.assertRaises(ArgumentError, func, object())
self.assertEqual(func(c_void_p(42)), 42)
class Adapter(object):
def from_param(cls, obj):
raise ValueError(obj)
func.argtypes = (Adapter(),)
# ArgumentError: argument 1: ValueError: 99
self.assertRaises(ArgumentError, func, 99)
################################################################
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
artwr/airflow | airflow/contrib/plugins/metastore_browser/main.py | 3 | 6084 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import json
from flask import Blueprint, request
from flask_appbuilder import BaseView, expose
import pandas as pd
from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.hooks.presto_hook import PrestoHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www.decorators import gzipped
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = None
DB_BLACKLIST = ['tmp']
TABLE_SELECTOR_LIMIT = 2000
# Keeping pandas from truncating long strings
pd.set_option('display.max_colwidth', -1)
# Creating a Flask-AppBuilder BaseView
class MetastoreBrowserView(BaseView):
default_view = 'index'
@expose('/')
def index(self):
sql = """
SELECT
a.name as db, db_location_uri as location,
count(1) as object_count, a.desc as description
FROM DBS a
JOIN TBLS b ON a.DB_ID = b.DB_ID
GROUP BY a.name, db_location_uri, a.desc
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
df.db = (
'<a href="/metastorebrowserview/db/?db=' +
df.db + '">' + df.db + '</a>')
table = df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
escape=False,
na_rep='',)
return self.render_template(
"metastore_browser/dbs.html", table=table)
@expose('/table/')
def table(self):
table_name = request.args.get("table")
m = HiveMetastoreHook(METASTORE_CONN_ID)
table = m.get_table(table_name)
return self.render_template(
"metastore_browser/table.html",
table=table, table_name=table_name, datetime=datetime, int=int)
@expose('/db/')
def db(self):
db = request.args.get("db")
m = HiveMetastoreHook(METASTORE_CONN_ID)
tables = sorted(m.get_tables(db=db), key=lambda x: x.tableName)
return self.render_template(
"metastore_browser/db.html", tables=tables, db=db)
@gzipped
@expose('/partitions/')
def partitions(self):
schema, table = request.args.get("table").split('.')
sql = """
SELECT
a.PART_NAME,
a.CREATE_TIME,
c.LOCATION,
c.IS_COMPRESSED,
c.INPUT_FORMAT,
c.OUTPUT_FORMAT
FROM PARTITIONS a
JOIN TBLS b ON a.TBL_ID = b.TBL_ID
JOIN DBS d ON b.DB_ID = d.DB_ID
JOIN SDS c ON a.SD_ID = c.SD_ID
WHERE
b.TBL_NAME like '{table}' AND
d.NAME like '{schema}'
ORDER BY PART_NAME DESC
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@gzipped
@expose('/objects/')
def objects(self):
where_clause = ''
if DB_WHITELIST:
dbs = ",".join(["'" + db + "'" for db in DB_WHITELIST])
where_clause = "AND b.name IN ({})".format(dbs)
if DB_BLACKLIST:
dbs = ",".join(["'" + db + "'" for db in DB_BLACKLIST])
where_clause = "AND b.name NOT IN ({})".format(dbs)
sql = """
SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE
FROM TBLS a
JOIN DBS b ON a.DB_ID = b.DB_ID
WHERE
a.TBL_NAME NOT LIKE '%tmp%' AND
a.TBL_NAME NOT LIKE '%temp%' AND
b.NAME NOT LIKE '%tmp%' AND
b.NAME NOT LIKE '%temp%'
{where_clause}
LIMIT {LIMIT};
""".format(where_clause=where_clause, LIMIT=TABLE_SELECTOR_LIMIT)
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
d = [
{'id': row[0], 'text': row[0]}
for row in h.get_records(sql)]
return json.dumps(d)
@gzipped
@expose('/data/')
def data(self):
table = request.args.get("table")
sql = "SELECT * FROM {table} LIMIT 1000;".format(table=table)
h = PrestoHook(PRESTO_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@expose('/ddl/')
def ddl(self):
table = request.args.get("table")
sql = "SHOW CREATE TABLE {table};".format(table=table)
h = HiveCliHook(HIVE_CLI_CONN_ID)
return h.run_cli(sql)
# Creating a flask blueprint to integrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
# Defining the plugin class
class MetastoreBrowserPlugin(AirflowPlugin):
name = "metastore_browser"
flask_blueprints = [bp]
appbuilder_views = [{"name": "Hive Metadata Browser",
"category": "Plugins",
"view": MetastoreBrowserView()}]
| apache-2.0 |
ubic135/odoo-design | addons/account/wizard/account_use_model.py | 341 | 3361 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_use_model(osv.osv_memory):
_name = 'account.use.model'
_description = 'Use model'
_columns = {
'model': fields.many2many('account.model', 'account_use_model_relation', 'account_id', 'model_id', 'Account Model'),
}
def view_init(self, cr , uid , fields_list, context=None):
account_model_obj = self.pool.get('account.model')
if context is None:
context = {}
if context.get('active_ids',False):
data_model = account_model_obj.browse(cr, uid, context['active_ids'])
for model in data_model:
for line in model.lines_id:
if line.date_maturity == 'partner':
if not line.partner_id:
raise osv.except_osv(_('Error!'), _("Maturity date of entry line generated by model line '%s' is based on partner payment term!"\
"\nPlease define partner on it!")%line.name)
pass
def create_entries(self, cr, uid, ids, context=None):
account_model_obj = self.pool.get('account.model')
mod_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
record_id = context and context.get('model_line', False) or False
if record_id:
model_ids = data['model']
else:
model_ids = context['active_ids']
move_ids = account_model_obj.generate(cr, uid, model_ids, context=context)
context = dict(context, move_ids=move_ids)
model_data_ids = mod_obj.search(cr, uid,[('model','=','ir.ui.view'),('name','=','view_move_form')], context=context)
resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'domain': "[('id','in', ["+','.join(map(str,context['move_ids']))+"])]",
'name': 'Entries',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move',
'views': [(False,'tree'),(resource_id,'form')],
'type': 'ir.actions.act_window',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
suncycheng/intellij-community | python/lib/Lib/site-packages/django/core/serializers/__init__.py | 74 | 3535 | """
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_query_set)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv" : "path.to.csv.serializer",
"txt" : "path.to.txt.serializer",
}
"""
from django.conf import settings
from django.utils import importlib
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml" : "django.core.serializers.xml_serializer",
"python" : "django.core.serializers.python",
"json" : "django.core.serializers.json",
}
# Check for PyYaml and register the serializer if it's available.
try:
import yaml
BUILTIN_SERIALIZERS["yaml"] = "django.core.serializers.pyyaml"
except ImportError:
pass
_serializers = {}
def register_serializer(format, serializer_module, serializers=None):
"""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
module = importlib.import_module(serializer_module)
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return _serializers.keys()
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in _serializers.iteritems() if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Returns an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is a instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
| apache-2.0 |
hackaugusto/raiden | raiden/tests/integration/network/proxies/test_token_network_registry.py | 1 | 4120 | from unittest.mock import patch
import pytest
from eth_utils import is_same_address, to_canonical_address
from raiden.constants import (
RED_EYES_PER_CHANNEL_PARTICIPANT_LIMIT,
RED_EYES_PER_TOKEN_NETWORK_LIMIT,
)
from raiden.exceptions import AddressWithoutCode, InvalidToken, RaidenRecoverableError
from raiden.network.proxies.token import Token
from raiden.network.proxies.token_network_registry import TokenNetworkRegistry
from raiden.tests.utils.factories import make_address
from raiden.tests.utils.smartcontracts import deploy_token
from raiden_contracts.constants import TEST_SETTLE_TIMEOUT_MAX, TEST_SETTLE_TIMEOUT_MIN
def test_token_network_registry(deploy_client, contract_manager, token_network_registry_address):
registry_address = to_canonical_address(token_network_registry_address)
token_network_registry_proxy = TokenNetworkRegistry(
jsonrpc_client=deploy_client,
registry_address=registry_address,
contract_manager=contract_manager,
)
assert token_network_registry_proxy.settlement_timeout_min() == TEST_SETTLE_TIMEOUT_MIN
assert token_network_registry_proxy.settlement_timeout_max() == TEST_SETTLE_TIMEOUT_MAX
bad_token_address = make_address()
# try to register non-existing token network
with pytest.raises(AddressWithoutCode):
token_network_registry_proxy.add_token_with_limits(
token_address=bad_token_address,
channel_participant_deposit_limit=RED_EYES_PER_CHANNEL_PARTICIPANT_LIMIT,
token_network_deposit_limit=RED_EYES_PER_TOKEN_NETWORK_LIMIT,
)
# create token network & register it
test_token = deploy_token(
deploy_client=deploy_client,
contract_manager=contract_manager,
initial_amount=1000,
decimals=0,
token_name="TKN",
token_symbol="TKN",
)
test_token_address = to_canonical_address(test_token.contract.address)
# try to register a token network not following ERC20 protocol
with patch.object(Token, "total_supply", return_value=""):
with pytest.raises(InvalidToken):
token_network_registry_proxy.add_token_with_limits(
token_address=test_token_address,
channel_participant_deposit_limit=RED_EYES_PER_CHANNEL_PARTICIPANT_LIMIT,
token_network_deposit_limit=RED_EYES_PER_TOKEN_NETWORK_LIMIT,
)
event_filter = token_network_registry_proxy.tokenadded_filter()
token_network_address = token_network_registry_proxy.add_token_with_limits(
token_address=test_token_address,
channel_participant_deposit_limit=RED_EYES_PER_CHANNEL_PARTICIPANT_LIMIT,
token_network_deposit_limit=RED_EYES_PER_TOKEN_NETWORK_LIMIT,
)
with pytest.raises(RaidenRecoverableError) as exc:
token_network_address = token_network_registry_proxy.add_token_with_limits(
token_address=test_token_address,
channel_participant_deposit_limit=RED_EYES_PER_CHANNEL_PARTICIPANT_LIMIT,
token_network_deposit_limit=RED_EYES_PER_TOKEN_NETWORK_LIMIT,
)
assert "Token already registered" in str(exc)
logs = event_filter.get_all_entries()
assert len(logs) == 1
decoded_event = token_network_registry_proxy.proxy.decode_event(logs[0])
assert is_same_address(decoded_event["args"]["token_address"], test_token.contract.address)
assert is_same_address(decoded_event["args"]["token_network_address"], token_network_address)
# test other getters
assert token_network_registry_proxy.get_token_network(bad_token_address) is None
assert is_same_address(
token_network_registry_proxy.get_token_network(test_token_address), token_network_address
)
with pytest.raises(ValueError):
assert token_network_registry_proxy.get_token_network(None) is None
assert token_network_registry_proxy.get_token_network(bad_token_address) is None
assert token_network_registry_proxy.get_token_network(token_network_address) is None
assert token_network_registry_proxy.get_token_network(test_token_address) is not None
| mit |
smishenk/blink-crosswalk | Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/xhr_benchmark_handler.py | 76 | 3946 | # Copyright 2014 Google Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the COPYING file or at
# https://developers.google.com/open-source/licenses/bsd
from mod_pywebsocket import util
class XHRBenchmarkHandler(object):
def __init__(self, headers, rfile, wfile):
self._logger = util.get_class_logger(self)
self.headers = headers
self.rfile = rfile
self.wfile = wfile
def do_send(self):
content_length = int(self.headers.getheader('Content-Length'))
self._logger.debug('Requested to receive %s bytes', content_length)
RECEIVE_BLOCK_SIZE = 1024 * 1024
bytes_to_receive = content_length
while bytes_to_receive > 0:
bytes_to_receive_in_this_loop = bytes_to_receive
if bytes_to_receive_in_this_loop > RECEIVE_BLOCK_SIZE:
bytes_to_receive_in_this_loop = RECEIVE_BLOCK_SIZE
received_data = self.rfile.read(bytes_to_receive_in_this_loop)
for c in received_data:
if c != 'a':
self._logger.debug('Request body verification failed')
return
bytes_to_receive -= len(received_data)
if bytes_to_receive < 0:
self._logger.debug('Received %d more bytes than expected' %
(-bytes_to_receive))
return
# Return the number of received bytes back to the client.
response_body = '%d' % content_length
self.wfile.write(
'HTTP/1.1 200 OK\r\n'
'Content-Type: text/html\r\n'
'Content-Length: %d\r\n'
'\r\n%s' % (len(response_body), response_body))
self.wfile.flush()
def do_receive(self):
content_length = int(self.headers.getheader('Content-Length'))
request_body = self.rfile.read(content_length)
request_array = request_body.split(' ')
if len(request_array) < 2:
self._logger.debug('Malformed request body: %r', request_body)
return
# Parse the size parameter.
bytes_to_send = request_array[0]
try:
bytes_to_send = int(bytes_to_send)
except ValueError, e:
self._logger.debug('Malformed size parameter: %r', bytes_to_send)
return
self._logger.debug('Requested to send %s bytes', bytes_to_send)
# Parse the transfer encoding parameter.
chunked_mode = False
mode_parameter = request_array[1]
if mode_parameter == 'chunked':
self._logger.debug('Requested chunked transfer encoding')
chunked_mode = True
elif mode_parameter != 'none':
self._logger.debug('Invalid mode parameter: %r', mode_parameter)
return
# Write a header
response_header = (
'HTTP/1.1 200 OK\r\n'
'Content-Type: application/octet-stream\r\n')
if chunked_mode:
response_header += 'Transfer-Encoding: chunked\r\n\r\n'
else:
response_header += (
'Content-Length: %d\r\n\r\n' % bytes_to_send)
self.wfile.write(response_header)
self.wfile.flush()
# Write a body
SEND_BLOCK_SIZE = 1024 * 1024
while bytes_to_send > 0:
bytes_to_send_in_this_loop = bytes_to_send
if bytes_to_send_in_this_loop > SEND_BLOCK_SIZE:
bytes_to_send_in_this_loop = SEND_BLOCK_SIZE
if chunked_mode:
self.wfile.write('%x\r\n' % bytes_to_send_in_this_loop)
self.wfile.write('a' * bytes_to_send_in_this_loop)
if chunked_mode:
self.wfile.write('\r\n')
self.wfile.flush()
bytes_to_send -= bytes_to_send_in_this_loop
if chunked_mode:
self.wfile.write('0\r\n\r\n')
self.wfile.flush()
| bsd-3-clause |
sekikn/incubator-airflow | airflow/contrib/hooks/__init__.py | 15 | 1053 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This package is deprecated. Please use `airflow.hooks` or `airflow.providers.*.hooks`."""
import warnings
warnings.warn(
"This package is deprecated. Please use `airflow.hooks` or `airflow.providers.*.hooks`.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 |
wakermahmud/sync-engine | inbox/util/url.py | 2 | 4681 | import dns
from dns.resolver import Resolver
from dns.resolver import NoNameservers, NXDOMAIN, Timeout, NoAnswer
from urllib import urlencode
from inbox.log import get_logger
import re
log = get_logger('inbox.util.url')
from inbox.providers import providers
# http://www.regular-expressions.info/email.html
EMAIL_REGEX = re.compile(r'[A-Z0-9._%+-]+@(?:[A-Z0-9-]+\.)+[A-Z]{2,4}',
re.IGNORECASE)
# Use Google's Public DNS server (8.8.8.8)
GOOGLE_DNS_IP = '8.8.8.8'
dns_resolver = Resolver()
dns_resolver.nameservers = [GOOGLE_DNS_IP]
class InvalidEmailAddressError(Exception):
pass
def _fallback_get_mx_domains(domain):
"""
Sometimes dns.resolver.Resolver fails to return what we want. See
http://stackoverflow.com/questions/18898847. In such cases, try using
dns.query.udp().
"""
try:
query = dns.message.make_query(domain, dns.rdatatype.MX)
answers = dns.query.udp(query, GOOGLE_DNS_IP).answer[0]
return [a for a in answers if a.rdtype == dns.rdatatype.MX]
except:
return []
def get_mx_domains(domain):
""" Retrieve and return the MX records for a domain. """
mx_records = []
try:
mx_records = dns_resolver.query(domain, 'MX')
except NoNameservers:
log.error('NoMXservers', domain=domain)
except NXDOMAIN:
log.error('No such domain', domain=domain)
except Timeout:
log.error('Time out during resolution', domain=domain)
except NoAnswer:
log.error('No answer from provider', domain=domain)
mx_records = _fallback_get_mx_domains(domain)
return [str(rdata.exchange).lower() for rdata in mx_records]
def mx_match(mx_domains, match_domains):
"""
Return True if any of the `mx_domains` matches an mx_domain
in `match_domains`.
"""
for mx_domain in mx_domains:
# Depending on how the MX server is configured, domain may
# refer to a relative name or to an absolute one.
# FIXME @karim: maybe resolve the server instead.
if mx_domain[-1] == '.':
mx_domain = mx_domain[:-1]
# Match the given domain against any of the mx_server regular
# expressions we have stored for the given domain. If none of them
# match, then we cannot confirm this as the given provider
match_filter = lambda x: re.search(x + '$', mx_domain)
if any(match_filter(m) for m in match_domains):
return True
return False
def provider_from_address(email_address):
if not EMAIL_REGEX.match(email_address):
raise InvalidEmailAddressError('Invalid email address')
domain = email_address.split('@')[1].lower()
mx_domains = get_mx_domains(domain)
ns_records = []
try:
ns_records = dns_resolver.query(domain, 'NS')
except NoNameservers:
log.error('NoNameservers', domain=domain)
except NXDOMAIN:
log.error('No such domain', domain=domain)
except Timeout:
log.error('Time out during resolution', domain=domain)
except NoAnswer:
log.error('No answer from provider', domain=domain)
for (name, info) in providers.iteritems():
provider_mx = info.get('mx_servers', [])
provider_ns = info.get('ns_servers', [])
provider_domains = info.get('domains', [])
# If domain is in the list of known domains for a provider,
# return the provider.
for d in provider_domains:
if domain.endswith(d):
return name
# If a retrieved mx_domain is in the list of stored MX domains for a
# provider, return the provider.
if mx_match(mx_domains, provider_mx):
return name
# If a retrieved name server is in the list of stored name servers for
# a provider, return the provider.
for rdata in ns_records:
if str(rdata).lower() in provider_ns:
return name
return 'unknown'
# From tornado.httputil
def url_concat(url, args, fragments=None):
"""
Concatenate url and argument dictionary regardless of whether
url has existing query parameters.
>>> url_concat("http://example.com/foo?a=b", dict(c="d"))
'http://example.com/foo?a=b&c=d'
"""
if not args and not fragments:
return url
# Strip off hashes
while url[-1] == '#':
url = url[:-1]
fragment_tail = ''
if fragments:
fragment_tail = '#' + urlencode(fragments)
args_tail = ''
if args:
if url[-1] not in ('?', '&'):
args_tail += '&' if ('?' in url) else '?'
args_tail += urlencode(args)
return url + args_tail + fragment_tail
| agpl-3.0 |
erdewit/ib_insync | ib_insync/order.py | 1 | 12269 | """Order types used by Interactive Brokers."""
from dataclasses import dataclass, field
from typing import ClassVar, List, NamedTuple, Set
from eventkit import Event
from .contract import Contract, TagValue
from .objects import Fill, SoftDollarTier, TradeLogEntry
from .util import UNSET_DOUBLE, UNSET_INTEGER, dataclassNonDefaults
__all__ = (
'Trade Order OrderStatus OrderState OrderComboLeg '
'LimitOrder MarketOrder StopOrder StopLimitOrder BracketOrder '
'OrderCondition ExecutionCondition MarginCondition TimeCondition '
'PriceCondition PercentChangeCondition VolumeCondition').split()
@dataclass
class Order:
"""
Order for trading contracts.
https://interactivebrokers.github.io/tws-api/available_orders.html
"""
orderId: int = 0
clientId: int = 0
permId: int = 0
action: str = ''
totalQuantity: float = 0.0
orderType: str = ''
lmtPrice: float = UNSET_DOUBLE
auxPrice: float = UNSET_DOUBLE
tif: str = ''
activeStartTime: str = ''
activeStopTime: str = ''
ocaGroup: str = ''
ocaType: int = 0
orderRef: str = ''
transmit: bool = True
parentId: int = 0
blockOrder: bool = False
sweepToFill: bool = False
displaySize: int = 0
triggerMethod: int = 0
outsideRth: bool = False
hidden: bool = False
goodAfterTime: str = ''
goodTillDate: str = ''
rule80A: str = ''
allOrNone: bool = False
minQty: int = UNSET_INTEGER
percentOffset: float = UNSET_DOUBLE
overridePercentageConstraints: bool = False
trailStopPrice: float = UNSET_DOUBLE
trailingPercent: float = UNSET_DOUBLE
faGroup: str = ''
faProfile: str = ''
faMethod: str = ''
faPercentage: str = ''
designatedLocation: str = ''
openClose: str = "O"
origin: int = 0
shortSaleSlot: int = 0
exemptCode: int = -1
discretionaryAmt: float = 0.0
eTradeOnly: bool = False
firmQuoteOnly: bool = False
nbboPriceCap: float = UNSET_DOUBLE
optOutSmartRouting: bool = False
auctionStrategy: int = 0
startingPrice: float = UNSET_DOUBLE
stockRefPrice: float = UNSET_DOUBLE
delta: float = UNSET_DOUBLE
stockRangeLower: float = UNSET_DOUBLE
stockRangeUpper: float = UNSET_DOUBLE
randomizePrice: bool = False
randomizeSize: bool = False
volatility: float = UNSET_DOUBLE
volatilityType: int = UNSET_INTEGER
deltaNeutralOrderType: str = ''
deltaNeutralAuxPrice: float = UNSET_DOUBLE
deltaNeutralConId: int = 0
deltaNeutralSettlingFirm: str = ''
deltaNeutralClearingAccount: str = ''
deltaNeutralClearingIntent: str = ''
deltaNeutralOpenClose: str = ''
deltaNeutralShortSale: bool = False
deltaNeutralShortSaleSlot: int = 0
deltaNeutralDesignatedLocation: str = ''
continuousUpdate: bool = False
referencePriceType: int = UNSET_INTEGER
basisPoints: float = UNSET_DOUBLE
basisPointsType: int = UNSET_INTEGER
scaleInitLevelSize: int = UNSET_INTEGER
scaleSubsLevelSize: int = UNSET_INTEGER
scalePriceIncrement: float = UNSET_DOUBLE
scalePriceAdjustValue: float = UNSET_DOUBLE
scalePriceAdjustInterval: int = UNSET_INTEGER
scaleProfitOffset: float = UNSET_DOUBLE
scaleAutoReset: bool = False
scaleInitPosition: int = UNSET_INTEGER
scaleInitFillQty: int = UNSET_INTEGER
scaleRandomPercent: bool = False
scaleTable: str = ''
hedgeType: str = ''
hedgeParam: str = ''
account: str = ''
settlingFirm: str = ''
clearingAccount: str = ''
clearingIntent: str = ''
algoStrategy: str = ''
algoParams: List[TagValue] = field(default_factory=list)
smartComboRoutingParams: List[TagValue] = field(default_factory=list)
algoId: str = ''
whatIf: bool = False
notHeld: bool = False
solicited: bool = False
modelCode: str = ''
orderComboLegs: List['OrderComboLeg'] = field(default_factory=list)
orderMiscOptions: List[TagValue] = field(default_factory=list)
referenceContractId: int = 0
peggedChangeAmount: float = 0.0
isPeggedChangeAmountDecrease: bool = False
referenceChangeAmount: float = 0.0
referenceExchangeId: str = ''
adjustedOrderType: str = ''
triggerPrice: float = UNSET_DOUBLE
adjustedStopPrice: float = UNSET_DOUBLE
adjustedStopLimitPrice: float = UNSET_DOUBLE
adjustedTrailingAmount: float = UNSET_DOUBLE
adjustableTrailingUnit: int = 0
lmtPriceOffset: float = UNSET_DOUBLE
conditions: List['OrderCondition'] = field(default_factory=list)
conditionsCancelOrder: bool = False
conditionsIgnoreRth: bool = False
extOperator: str = ''
softDollarTier: SoftDollarTier = field(default_factory=SoftDollarTier)
cashQty: float = UNSET_DOUBLE
mifid2DecisionMaker: str = ''
mifid2DecisionAlgo: str = ''
mifid2ExecutionTrader: str = ''
mifid2ExecutionAlgo: str = ''
dontUseAutoPriceForHedge: bool = False
isOmsContainer: bool = False
discretionaryUpToLimitPrice: bool = False
autoCancelDate: str = ''
filledQuantity: float = UNSET_DOUBLE
refFuturesConId: int = 0
autoCancelParent: bool = False
shareholder: str = ''
imbalanceOnly: bool = False
routeMarketableToBbo: bool = False
parentPermId: int = 0
usePriceMgmtAlgo: bool = False
def __repr__(self):
attrs = dataclassNonDefaults(self)
if self.__class__ is not Order:
attrs.pop('orderType', None)
if not self.softDollarTier:
attrs.pop('softDollarTier')
clsName = self.__class__.__qualname__
kwargs = ', '.join(
f'{k}={v!r}' for k, v in attrs.items())
return f'{clsName}({kwargs})'
__str__ = __repr__
def __eq__(self, other):
return self is other
def __hash__(self):
return id(self)
class LimitOrder(Order):
def __init__(self, action: str, totalQuantity: float, lmtPrice: float,
**kwargs):
Order.__init__(
self, orderType='LMT', action=action,
totalQuantity=totalQuantity, lmtPrice=lmtPrice, **kwargs)
class MarketOrder(Order):
def __init__(self, action: str, totalQuantity: float, **kwargs):
Order.__init__(
self, orderType='MKT', action=action,
totalQuantity=totalQuantity, **kwargs)
class StopOrder(Order):
def __init__(self, action: str, totalQuantity: float, stopPrice: float,
**kwargs):
Order.__init__(
self, orderType='STP', action=action,
totalQuantity=totalQuantity, auxPrice=stopPrice, **kwargs)
class StopLimitOrder(Order):
def __init__(self, action: str, totalQuantity: float, lmtPrice: float,
stopPrice: float, **kwargs):
Order.__init__(
self, orderType='STP LMT', action=action,
totalQuantity=totalQuantity, lmtPrice=lmtPrice,
auxPrice=stopPrice, **kwargs)
@dataclass
class OrderStatus:
orderId: int = 0
status: str = ''
filled: float = 0.0
remaining: float = 0.0
avgFillPrice: float = 0.0
permId: int = 0
parentId: int = 0
lastFillPrice: float = 0.0
clientId: int = 0
whyHeld: str = ''
mktCapPrice: float = 0.0
PendingSubmit: ClassVar[str] = 'PendingSubmit'
PendingCancel: ClassVar[str] = 'PendingCancel'
PreSubmitted: ClassVar[str] = 'PreSubmitted'
Submitted: ClassVar[str] = 'Submitted'
ApiPending: ClassVar[str] = 'ApiPending'
ApiCancelled: ClassVar[str] = 'ApiCancelled'
Cancelled: ClassVar[str] = 'Cancelled'
Filled: ClassVar[str] = 'Filled'
Inactive: ClassVar[str] = 'Inactive'
DoneStates: ClassVar[Set[str]] = {'Filled', 'Cancelled', 'ApiCancelled'}
ActiveStates: ClassVar[Set[str]] = {
'PendingSubmit', 'ApiPending', 'PreSubmitted', 'Submitted'}
@dataclass
class OrderState:
status: str = ''
initMarginBefore: str = ''
maintMarginBefore: str = ''
equityWithLoanBefore: str = ''
initMarginChange: str = ''
maintMarginChange: str = ''
equityWithLoanChange: str = ''
initMarginAfter: str = ''
maintMarginAfter: str = ''
equityWithLoanAfter: str = ''
commission: float = UNSET_DOUBLE
minCommission: float = UNSET_DOUBLE
maxCommission: float = UNSET_DOUBLE
commissionCurrency: str = ''
warningText: str = ''
completedTime: str = ''
completedStatus: str = ''
@dataclass
class OrderComboLeg:
price: float = UNSET_DOUBLE
@dataclass
class Trade:
"""
Trade keeps track of an order, its status and all its fills.
Events:
* ``statusEvent`` (trade: :class:`.Trade`)
* ``modifyEvent`` (trade: :class:`.Trade`)
* ``fillEvent`` (trade: :class:`.Trade`, fill: :class:`.Fill`)
* ``commissionReportEvent`` (trade: :class:`.Trade`,
fill: :class:`.Fill`, commissionReport: :class:`.CommissionReport`)
* ``filledEvent`` (trade: :class:`.Trade`)
* ``cancelEvent`` (trade: :class:`.Trade`)
* ``cancelledEvent`` (trade: :class:`.Trade`)
"""
events: ClassVar = (
'statusEvent', 'modifyEvent', 'fillEvent',
'commissionReportEvent', 'filledEvent',
'cancelEvent', 'cancelledEvent')
contract: Contract = field(default_factory=Contract)
order: Order = field(default_factory=Order)
orderStatus: 'OrderStatus' = field(default_factory=OrderStatus)
fills: List[Fill] = field(default_factory=list)
log: List[TradeLogEntry] = field(default_factory=list)
def __post_init__(self):
self.statusEvent = Event('statusEvent')
self.modifyEvent = Event('modifyEvent')
self.fillEvent = Event('fillEvent')
self.commissionReportEvent = Event('commissionReportEvent')
self.filledEvent = Event('filledEvent')
self.cancelEvent = Event('cancelEvent')
self.cancelledEvent = Event('cancelledEvent')
def isActive(self):
"""True if eligible for execution, false otherwise."""
return self.orderStatus.status in OrderStatus.ActiveStates
def isDone(self):
"""True if completely filled or cancelled, false otherwise."""
return self.orderStatus.status in OrderStatus.DoneStates
def filled(self):
"""Number of shares filled."""
fills = self.fills
if self.contract.secType == 'BAG':
# don't count fills for the leg contracts
fills = [f for f in fills if f.contract.secType == 'BAG']
return sum(f.execution.shares for f in fills)
def remaining(self):
"""Number of shares remaining to be filled."""
return self.order.totalQuantity - self.filled()
class BracketOrder(NamedTuple):
parent: Order
takeProfit: Order
stopLoss: Order
@dataclass
class OrderCondition:
@staticmethod
def createClass(condType):
d = {
1: PriceCondition,
3: TimeCondition,
4: MarginCondition,
5: ExecutionCondition,
6: VolumeCondition,
7: PercentChangeCondition}
return d[condType]
def And(self):
self.conjunction = 'a'
return self
def Or(self):
self.conjunction = 'o'
return self
@dataclass
class PriceCondition(OrderCondition):
condType: int = 1
conjunction: str = 'a'
isMore: bool = True
price: float = 0.0
conId: int = 0
exch: str = ''
triggerMethod: int = 0
@dataclass
class TimeCondition(OrderCondition):
condType: int = 3
conjunction: str = 'a'
isMore: bool = True
time: str = ''
@dataclass
class MarginCondition(OrderCondition):
condType: int = 4
conjunction: str = 'a'
isMore: bool = True
percent: int = 0
@dataclass
class ExecutionCondition(OrderCondition):
condType: int = 5
conjunction: str = 'a'
secType: str = ''
exch: str = ''
symbol: str = ''
@dataclass
class VolumeCondition(OrderCondition):
condType: int = 6
conjunction: str = 'a'
isMore: bool = True
volume: int = 0
conId: int = 0
exch: str = ''
@dataclass
class PercentChangeCondition(OrderCondition):
condType: int = 7
conjunction: str = 'a'
isMore: bool = True
changePercent: float = 0.0
conId: int = 0
exch: str = ''
| bsd-2-clause |
sumedhasingla/VTK | Examples/Infovis/Python/selection.py | 18 | 3457 | #!/usr/bin/env python
from vtk import *
source = vtkRandomGraphSource()
source.DirectedOff()
source.SetNumberOfVertices(100)
source.SetEdgeProbability(0) # Basically generates a tree
source.SetUseEdgeProbability(True)
source.SetStartWithTree(True)
source.IncludeEdgeWeightsOn()
# Connect to the Boost centrality filter.
centrality = vtkBoostBrandesCentrality ()
centrality.SetInputConnection(source.GetOutputPort())
# Create force directed layout
forceStrat = vtkSimple2DLayoutStrategy()
forceStrat.SetInitialTemperature(5)
# Create circular layout
fastStrat = vtkFast2DLayoutStrategy()
# Create a graph layout view
view = vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(centrality.GetOutputPort())
view.SetVertexLabelArrayName("vertex id")
view.SetVertexLabelVisibility(True)
view.SetVertexColorArrayName("vertex id")
view.SetColorVertices(True)
view.SetEdgeColorArrayName("edge weight")
view.SetColorEdges(True)
view.SetLayoutStrategy(forceStrat)
# Create a second shrubery!
view2 = vtkGraphLayoutView()
view2.AddRepresentationFromInputConnection(centrality.GetOutputPort())
view2.SetVertexLabelArrayName("vertex id")
view2.SetVertexLabelVisibility(True)
view2.SetVertexColorArrayName("vertex id")
view2.SetColorVertices(True)
view2.SetEdgeColorArrayName("centrality")
view2.SetColorEdges(True)
view2.SetLayoutStrategy(fastStrat)
# Demonstrate value based selection on edges
sel = vtkSelectionSource()
sel.SetContentType(7) # Thresholds
sel.SetFieldType(4) # Edge
sel.SetArrayName("centrality")
sel.AddThreshold(500,5000) # High centrality edges
sel.Update()
# Take selection and extract a graph
extract_graph = vtkExtractSelectedGraph()
extract_graph.AddInputConnection(centrality.GetOutputPort())
extract_graph.SetSelectionConnection(sel.GetOutputPort())
# Create a view for the extracted graph
view3 = vtkGraphLayoutView()
view3.AddRepresentationFromInputConnection(extract_graph.GetOutputPort())
view3.SetVertexLabelArrayName("vertex id")
view3.SetVertexLabelVisibility(True)
view3.SetVertexColorArrayName("vertex id")
view3.SetColorVertices(True)
view3.SetEdgeColorArrayName("centrality")
view3.SetColorEdges(True)
view3.SetLayoutStrategyToSimple2D()
# Make sure the views are using a pedigree id selection
view.GetRepresentation(0).SetSelectionType(2)
view2.GetRepresentation(0).SetSelectionType(2)
view3.GetRepresentation(0).SetSelectionType(2)
# Create a selection link and set both view to use it
annotationLink = vtkAnnotationLink()
view.GetRepresentation(0).SetAnnotationLink(annotationLink)
view2.GetRepresentation(0).SetAnnotationLink(annotationLink)
view3.GetRepresentation(0).SetAnnotationLink(annotationLink)
annotationLink.SetCurrentSelection(sel.GetOutput())
updater = vtkViewUpdater()
updater.AddAnnotationLink(annotationLink)
updater.AddView(view)
updater.AddView(view2)
updater.AddView(view3)
# Set the theme on the view
theme = vtkViewTheme.CreateMellowTheme()
theme.SetLineWidth(5)
theme.SetPointSize(10)
theme.SetCellOpacity(.99)
theme.SetSelectedCellColor(1,0,1)
theme.SetSelectedPointColor(1,0,1)
view.ApplyViewTheme(theme)
view2.ApplyViewTheme(theme)
view3.ApplyViewTheme(theme)
theme.FastDelete()
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
view2.GetRenderWindow().SetSize(600, 600)
view2.ResetCamera()
view2.Render()
view3.GetRenderWindow().SetSize(600, 600)
view3.ResetCamera()
view3.Render()
view.GetInteractor().Start()
| bsd-3-clause |
christiandev/l10n-brazil | __unported__/l10n_br_account_product/sped/nfe/serializer/txt.py | 2 | 52934 | # -*- encoding: utf-8 -*-
# ##############################################################################
# #
# Copyright (C) 2009 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU Affero General Public License for more details. #
# #
#You should have received a copy of the GNU Affero General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import time
from datetime import datetime
from unicodedata import normalize
import pytz
from openerp import SUPERUSER_ID
from openerp.osv import osv
from openerp.tools.translate import _
from openerp import netsvc
import pooler
from openerp.addons.l10n_br_base.tools.misc import punctuation_rm
def nfe_export(cr, uid, ids, nfe_environment='1',
nfe_version='2.00', context=False):
# StrFile = ''
StrNF = 'NOTA FISCAL|%s|\n' % len(ids)
StrFile = StrNF
pool = pooler.get_pool(cr.dbname)
nfes = []
for inv in pool.get('account.invoice').browse(cr, uid, ids, context={'lang': 'pt_BR'}):
#Endereço do company
company_addr = pool.get('res.partner').address_get(cr, uid, [inv.company_id.partner_id.id], ['default'])
company_addr_default = \
pool.get('res.partner').browse(cr, uid, [company_addr['default']], context={'lang': 'pt_BR'})[0]
StrA = 'A|%s|%s|\n' % (nfe_version, '')
StrFile += StrA
StrRegB = {
'cUF': company_addr_default.state_id.ibge_code,
'cNF': '',
'NatOp': normalize('NFKD', unicode(inv.cfop_ids[0].small_name or '')).encode('ASCII', 'ignore'),
'indPag': inv.payment_term and inv.payment_term.indPag or '0',
'mod': inv.fiscal_document_id.code,
'serie': inv.document_serie_id.code,
'nNF': inv.internal_number or '',
'hSaiEnt': '',
'tpNF': '',
'cMunFG': ('%s%s') % (
company_addr_default.state_id.ibge_code, company_addr_default.l10n_br_city_id.ibge_code),
'TpImp': '1',
'TpEmis': '1',
'cDV': '',
'tpAmb': nfe_environment,
'finNFe': inv.nfe_purpose,
'procEmi': '0',
# 'VerProc': '2.2.26',
'dhCont': '',
'xJust': '',
}
if inv.cfop_ids[0].type in ("input"):
StrRegB['tpNF'] = '0'
else:
StrRegB['tpNF'] = '1'
if nfe_version == '3.10':
# Capturar a timezone do usuario
user_pool = inv.pool.get('res.users')
user = user_pool.browse(cr, SUPERUSER_ID, uid)
tz = pytz.timezone(user.partner_id.tz) or pytz.utc
StrRegB['dhEmi'] = str(pytz.utc.localize(
datetime.strptime(inv.date_hour_invoice, '%Y-%m-%d %H:%M:%S')).astimezone(tz)).replace(' ', 'T') or ''
StrRegB['dhSaiEnt'] = str(pytz.utc.localize(
datetime.strptime(inv.date_in_out, '%Y-%m-%d %H:%M:%S')).astimezone(tz)).replace(' ', 'T') or ''
StrRegB['idDest'] = inv.fiscal_position.id_dest or ''
StrRegB['indFinal'] = inv.ind_final or ''
StrRegB['indPres'] = inv.ind_pres or ''
StrRegB['VerProc'] = '3.10.18'
#Modificado
StrB = 'B|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (
StrRegB['cUF'], StrRegB['cNF'], StrRegB['NatOp'], StrRegB['indPag'], StrRegB['mod'],
StrRegB['serie'], StrRegB['nNF'], StrRegB['dhEmi'], StrRegB['dhSaiEnt'], StrRegB['hSaiEnt'],
StrRegB['tpNF'], StrRegB['idDest'], StrRegB['cMunFG'], StrRegB['TpImp'], StrRegB['TpEmis'],
StrRegB['cDV'], StrRegB['tpAmb'], StrRegB['finNFe'], StrRegB['indFinal'], StrRegB['indPres'],
StrRegB['procEmi'], StrRegB['VerProc'], StrRegB['dhCont'], StrRegB['xJust'])
else:
StrRegB['dEmi'] = inv.date_invoice or ''
StrRegB['dSaiEnt'] = str(datetime.strptime(inv.date_in_out, '%Y-%m-%d %H:%M:%S').date()) or ''
StrRegB['VerProc'] = '2.2.26'
StrB = 'B|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (
StrRegB['cUF'], StrRegB['cNF'], StrRegB['NatOp'], StrRegB['indPag'],
StrRegB['mod'], StrRegB['serie'], StrRegB['nNF'], StrRegB['dEmi'], StrRegB['dSaiEnt'],
StrRegB['hSaiEnt'], StrRegB['tpNF'], StrRegB['cMunFG'], StrRegB['TpImp'], StrRegB['TpEmis'],
StrRegB['cDV'], StrRegB['tpAmb'], StrRegB['finNFe'], StrRegB['procEmi'], StrRegB['VerProc'],
StrRegB['dhCont'], StrRegB['xJust'])
StrFile += StrB
for inv_related in inv.fiscal_document_related_ids:
if inv_related.document_type == 'nf':
StrRegB14 = {
'cUF': '%s' % inv_related.state_id and inv_related.state_id.ibge_code or '',
'AAMM': datetime.strptime(inv_related.date, '%Y-%m-%d').strftime('%y%m') or '',
'CNPJ': punctuation_rm(inv_related.cnpj_cpf),
'Mod': inv_related.fiscal_document_id and inv_related.fiscal_document_id.code or '',
'serie': inv_related.serie or '',
'nNF': punctuation_rm(inv_related.internal_number),
}
StrB14 = 'B14|%s|%s|%s|%s|%s|%s|\n' % (StrRegB14['cUF'],
StrRegB14['AAMM'], StrRegB14['CNPJ'], StrRegB14['CNPJ'],
StrRegB14['serie'], StrRegB14['nNF'])
StrFile += StrB14
elif inv_related.document_type == 'nfrural':
StrRegB20a = {
'cUF': '%s' % inv_related.state_id and inv_related.state_id.ibge_code or '',
'AAMM': datetime.strptime(inv_related.date, '%Y-%m-%d').strftime('%y%m') or '',
'IE': punctuation_rm(inv_related.inscr_est),
'mod': inv_related.fiscal_document_id and inv_related.fiscal_document_id.code or '',
'serie': inv_related.serie or '',
'nNF': punctuation_rm(inv_related.internal_number),
}
StrB20a = 'B20a|%s|%s|%s|%s|%s|%s|\n' % (StrRegB20a['cUF'],
StrRegB20a['AAMM'], StrRegB20a['IE'], StrRegB20a['mod'],
StrRegB20a['serie'], StrRegB20a['nNF'])
StrFile += StrB20a
if inv_related.cpfcnpj_type == 'cnpj':
StrRegB20d = {
'CNPJ': punctuation_rm(inv_related.cnpj_cpf)
}
StrB20d = 'B20d|%s|\n' % StrRegB20d['CNPJ']
StrFile += StrB20d
else:
StrRegB20e = {
'CPF': punctuation_rm(inv_related.cnpj_cpf)
}
StrB20e = 'B20e|%s|\n' % StrRegB20e['CPF']
StrFile += StrB20e
elif inv_related.document_type == 'nfe':
StrRegB13 = {
'refNFe': inv_related.access_key or '',
}
StrB13 = 'B13|%s|\n' % StrRegB13['refNFe']
StrFile += StrB13
elif inv_related.document_type == 'cte':
StrRegB20i = {
'refCTe': inv_related.access_key or '',
}
StrB20i = 'B20i|%s|\n' % StrRegB20i['refCTe']
StrFile += StrB20i
elif inv_related.document_type == 'cf':
StrRegB20j = {
'mod': inv_related.fiscal_document_id and inv_related.fiscal_document_id.code or '',
'nECF': inv_related.internal_number,
'nCOO': inv_related.serie,
}
StrB20j = 'B20j|%s|%s|%s|\n' % (StrRegB20j['mod'], StrRegB20j['nECF'], StrRegB20j['nCOO'])
StrFile += StrB20j
StrRegC = {
'XNome': normalize('NFKD',unicode(inv.company_id.partner_id.legal_name or '')).encode('ASCII','ignore'),
'XFant': normalize('NFKD',unicode(inv.company_id.partner_id.name or '')).encode('ASCII','ignore'),
'IE': punctuation_rm(inv.company_id.partner_id.inscr_est),
'IEST': '',
'IM': punctuation_rm(inv.company_id.partner_id.inscr_mun),
'CNAE': punctuation_rm(inv.company_id.cnae_main_id.code),
'CRT': inv.company_id.fiscal_type or '',
}
#TODO - Verificar, pois quando e informado do CNAE ele exige que a inscricao municipal, parece um bug do emissor da NFE
if not inv.company_id.partner_id.inscr_mun:
StrRegC['CNAE'] = ''
StrC = 'C|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegC['XNome'], StrRegC['XFant'], StrRegC['IE'], StrRegC['IEST'],
StrRegC['IM'], StrRegC['CNAE'], StrRegC['CRT'])
StrFile += StrC
if inv.company_id.partner_id.is_company:
StrC02 = 'C02|%s|\n' % (punctuation_rm(inv.company_id.partner_id.cnpj_cpf))
else:
StrC02 = 'C02a|%s|\n' % (punctuation_rm(inv.company_id.partner_id.cnpj_cpf))
StrFile += StrC02
address_company_bc_code = ''
if company_addr_default.country_id.bc_code:
address_company_bc_code = company_addr_default.country_id.bc_code[1:]
StrRegC05 = {
'XLgr': normalize('NFKD', unicode(company_addr_default.street or '')).encode('ASCII', 'ignore'),
'Nro': company_addr_default.number or '',
'Cpl': normalize('NFKD', unicode(company_addr_default.street2 or '')).encode('ASCII', 'ignore'),
'Bairro': normalize('NFKD', unicode(company_addr_default.district or 'Sem Bairro')).encode('ASCII',
'ignore'),
'CMun': '%s%s' % (company_addr_default.state_id.ibge_code, company_addr_default.l10n_br_city_id.ibge_code),
'XMun': normalize('NFKD', unicode(company_addr_default.l10n_br_city_id.name or '')).encode('ASCII',
'ignore'),
'UF': company_addr_default.state_id.code or '',
'CEP': punctuation_rm(company_addr_default.zip),
'cPais': address_company_bc_code or '',
'xPais': normalize('NFKD', unicode(company_addr_default.country_id.name or '')).encode('ASCII', 'ignore'),
'fone': punctuation_rm(company_addr_default.phone or '').replace(' ', ''),
}
StrC05 = 'C05|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (
StrRegC05['XLgr'], StrRegC05['Nro'], StrRegC05['Cpl'], StrRegC05['Bairro'],
StrRegC05['CMun'], StrRegC05['XMun'], StrRegC05['UF'], StrRegC05['CEP'],
StrRegC05['cPais'], StrRegC05['xPais'], StrRegC05['fone'])
StrFile += StrC05
partner_bc_code = ''
address_invoice_state_code = ''
address_invoice_city = ''
UFEmbarq = ''
XLocEmbarq = ''
partner_cep = ''
if inv.partner_id.country_id.bc_code:
partner_bc_code = inv.partner_id.country_id.bc_code[1:]
if inv.partner_id.country_id.id != company_addr_default.country_id.id:
address_invoice_state_code = 'EX'
address_invoice_city = 'Exterior'
address_invoice_city_code = '9999999'
UFEmbarq = company_addr_default.state_id.code
XLocEmbarq = company_addr_default.city
partner_cep = ''
else:
address_invoice_state_code = inv.partner_id.state_id.code
address_invoice_city = normalize('NFKD',
unicode(inv.partner_id.l10n_br_city_id.name or '')).encode('ASCII','ignore')
address_invoice_city_code = ('%s%s') % (
inv.partner_id.state_id.ibge_code, inv.partner_id.l10n_br_city_id.ibge_code)
partner_cep = punctuation_rm(inv.partner_id.zip)
# Se o ambiente for de teste deve ser escrito na razão do destinatário
if nfe_environment == '2':
xNome = 'NF-E EMITIDA EM AMBIENTE DE HOMOLOGACAO - SEM VALOR FISCAL'
else:
xNome = normalize('NFKD', unicode(inv.partner_id.legal_name or '')).encode('ASCII', 'ignore')
StrRegE = {
'xNome': xNome,
'IE': punctuation_rm(inv.partner_id.inscr_est),
'ISUF': '',
'email': inv.partner_id.email or '',
}
# Adicionado
if nfe_version == '3.10':
StrRegE['indIEDest'] = '9'
StrRegE['IM'] = StrRegC['IM']
StrE = 'E|%s|%s|%s|%s|%s|\n' % (
StrRegE['xNome'], StrRegE['indIEDest'], StrRegE['IE'], StrRegE['ISUF'], StrRegE['email'])
else:
StrE = 'E|%s|%s|%s|%s|\n' % (StrRegE['xNome'], StrRegE['IE'], StrRegE['ISUF'], StrRegE['IM'], StrRegE['email'])
StrFile += StrE
if inv.partner_id.is_company:
StrE0 = 'E02|%s|\n' % (punctuation_rm(inv.partner_id.cnpj_cpf))
else:
StrE0 = 'E03|%s|\n' % (punctuation_rm(inv.partner_id.cnpj_cpf))
StrFile += StrE0
StrRegE05 = {
'xLgr': normalize('NFKD', unicode(inv.partner_id.street or '')).encode('ASCII', 'ignore'),
'nro': normalize('NFKD', unicode(inv.partner_id.number or '')).encode('ASCII', 'ignore'),
'xCpl': punctuation_rm(normalize('NFKD',unicode(inv.partner_id.street2 or '' )).encode('ASCII','ignore')),
'xBairro': normalize('NFKD', unicode(inv.partner_id.district or 'Sem Bairro')).encode('ASCII', 'ignore'),
'cMun': address_invoice_city_code,
'xMun': address_invoice_city,
'UF': address_invoice_state_code,
'CEP': partner_cep,
'cPais': partner_bc_code,
'xPais': normalize('NFKD', unicode(inv.partner_id.country_id.name or '')).encode('ASCII', 'ignore'),
# 'fone': re.sub('[%s]' % re.escape(string.punctuation), '',
# str(inv.partner_id.phone or '').replace(' ', '')),
'fone': punctuation_rm(inv.partner_id.phone or '').replace(' ', ''),
}
StrE05 = 'E05|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (
StrRegE05['xLgr'], StrRegE05['nro'], StrRegE05['xCpl'], StrRegE05['xBairro'],
StrRegE05['cMun'], StrRegE05['xMun'], StrRegE05['UF'], StrRegE05['CEP'],
StrRegE05['cPais'], StrRegE05['xPais'], StrRegE05['fone'],)
StrFile += StrE05
if inv.partner_shipping_id:
if inv.partner_id.id != inv.partner_shipping_id.id:
StrRegG = {
'XLgr': normalize('NFKD', unicode(inv.partner_shipping_id.street or '', )).encode('ASCII',
'ignore'),
'Nro': normalize('NFKD', unicode(inv.partner_shipping_id.number or '')).encode('ASCII', 'ignore'),
'XCpl': punctuation_rm(normalize('NFKD',unicode(inv.partner_shipping_id.street2 or
'' )).encode('ASCII','ignore')),
'XBairro': punctuation_rm(normalize('NFKD',
unicode(inv.partner_shipping_id.district or
'Sem Bairro' )).encode('ASCII','ignore')),
'CMun': ('%s%s') % (
inv.partner_shipping_id.state_id.ibge_code, inv.partner_shipping_id.l10n_br_city_id.ibge_code),
'XMun': normalize('NFKD', unicode(inv.partner_shipping_id.l10n_br_city_id.name or '')).encode(
'ASCII', 'ignore'),
'UF': inv.partner_shipping_id.state_id.code,
}
StrG = 'G|%s|%s|%s|%s|%s|%s|%s|\n' % (
StrRegG['XLgr'], StrRegG['Nro'], StrRegG['XCpl'], StrRegG['XBairro'], StrRegG['CMun'],
StrRegG['XMun'],
StrRegG['UF'])
StrFile += StrG
if inv.partner_id.is_company:
# StrG0 = 'G02|%s|\n' % (
# re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or ''))
StrG0 = 'G02|%s|\n' % punctuation_rm(inv.partner_id.cnpj_cpf)
else:
# StrG0 = 'G02a|%s|\n' % (
# re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or ''))
StrG0 = 'G02a|%s|\n' % punctuation_rm(inv.partner_id.cnpj_cpf)
StrFile += StrG0
i = 0
for inv_line in inv.invoice_line:
i += 1
# FIXME
if inv_line.freight_value:
freight_value = str("%.2f" % inv_line.freight_value)
else:
freight_value = ''
if inv_line.insurance_value:
insurance_value = str("%.2f" % inv_line.insurance_value)
else:
insurance_value = ''
if inv_line.other_costs_value:
other_costs_value = str("%.2f" % inv_line.other_costs_value)
else:
other_costs_value = ''
if inv_line.discount_value:
discount_value = str("%.2f" % inv_line.discount_value)
else:
discount_value = ''
StrH = 'H|%s||\n' % (i)
StrFile += StrH
CProd = ''
XProd = ''
if inv_line.product_id.code:
CProd = inv_line.product_id.code
XProd = normalize('NFKD', unicode(inv_line.product_id.name or '')).encode('ASCII', 'ignore')
else:
CProd = unicode(i).strip().rjust(4, u'0')
XProd = normalize('NFKD', unicode(inv_line.name or '')).encode('ASCII', 'ignore')
StrRegI = {
'CProd': CProd,
'CEAN': inv_line.product_id.ean13 or '',
'XProd': XProd,
'EXTIPI': '',
'CFOP': inv_line.cfop_id.code,
'UCom': normalize('NFKD', unicode(inv_line.uos_id.name or '', )).encode('ASCII', 'ignore'),
'QCom': str("%.4f" % inv_line.quantity),
'VUnCom': str("%.7f" % inv_line.price_unit),
'VProd': str("%.2f" % inv_line.price_gross),
'CEANTrib': inv_line.product_id.ean13 or '',
'UTrib': normalize('NFKD', unicode(inv_line.uos_id.name or '', )).encode('ASCII', 'ignore'),
'QTrib': str("%.4f" % inv_line.quantity),
'VUnTrib': str("%.7f" % inv_line.price_unit),
'VFrete': freight_value,
'VSeg': insurance_value,
'VDesc': discount_value,
'vOutro': other_costs_value,
'indTot': '1',
'xPed': '',
'nItemPed': '',
# 'NCM': re.sub('[%s]' % re.escape(string.punctuation),
# '', inv_line.fiscal_classification_id.name or '')
}
StrRegI['NCM'] = punctuation_rm(
inv_line.fiscal_classification_id.name)
if nfe_version == '3.10':
StrRegI['NVE'] = ''
StrRegI['nFCI'] = ''
StrI = 'I|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (
StrRegI['CProd'], StrRegI['CEAN'], StrRegI['XProd'], StrRegI['NCM'],
StrRegI['NVE'], StrRegI['EXTIPI'], StrRegI['CFOP'], StrRegI['UCom'],
StrRegI['QCom'], StrRegI['VUnCom'], StrRegI['VProd'], StrRegI['CEANTrib'],
StrRegI['UTrib'], StrRegI['QTrib'], StrRegI['VUnTrib'], StrRegI['VFrete'],
StrRegI['VSeg'], StrRegI['VDesc'], StrRegI['vOutro'], StrRegI['indTot'],
StrRegI['xPed'], StrRegI['nItemPed'], StrRegI['nFCI'])
else:
StrI = 'I|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (
StrRegI['CProd'], StrRegI['CEAN'], StrRegI['XProd'], StrRegI['NCM'],
StrRegI['EXTIPI'], StrRegI['CFOP'], StrRegI['UCom'], StrRegI['QCom'],
StrRegI['VUnCom'], StrRegI['VProd'], StrRegI['CEANTrib'], StrRegI['UTrib'],
StrRegI['QTrib'], StrRegI['VUnTrib'], StrRegI['VFrete'], StrRegI['VSeg'],
StrRegI['VDesc'], StrRegI['vOutro'], StrRegI['indTot'], StrRegI['xPed'],
StrRegI['nItemPed'])
StrFile += StrI
for inv_di in inv_line.import_declaration_ids:
StrRegI18 = {
'NDI': inv_di.name,
'DDI': inv_di.date_registration or '',
'XLocDesemb': inv_di.location,
'UFDesemb': inv_di.state_id.code or '',
'DDesemb': inv_di.date_release or '',
'CExportador': inv_di.exporting_code,
}
StrI18 = 'I18|%s|%s|%s|%s|%s|%s|\n' % (
StrRegI18['NDI'],
StrRegI18['DDI'],
StrRegI18['XLocDesemb'],
StrRegI18['UFDesemb'],
StrRegI18['DDesemb'],
StrRegI18['CExportador'],
)
StrFile += StrI18
for inv_di_line in inv_di.line_ids:
StrRegI25 = {
'NAdicao': inv_di_line.name,
'NSeqAdic': inv_di_line.sequence,
'CFabricante': inv_di_line.manufacturer_code,
'VDescDI': str("%.2f" % inv_di_line.amount_discount),
}
StrI25 = 'I25|%s|%s|%s|%s|\n' % (
StrRegI25['NAdicao'],
StrRegI25['NSeqAdic'],
StrRegI25['CFabricante'],
StrRegI25['VDescDI'],
)
StrFile += StrI25
icms_cst = inv_line.icms_cst_id and inv_line.icms_cst_id.code or ''
ipi_cst = inv_line.ipi_cst_id and inv_line.ipi_cst_id.code or ''
pis_cst = inv_line.pis_cst_id and inv_line.pis_cst_id.code or ''
cofins_cst = inv_line.cofins_cst_id and inv_line.cofins_cst_id.code or ''
StrM = 'M|\n'
StrFile += StrM
StrN = 'N|\n'
StrFile += StrN
#TODO - Fazer alteração para cada tipo de cst ICMS
if inv_line.product_type == 'product':
if icms_cst in ('00',):
StrRegN02 = {
'Orig': inv_line.product_id.origin or '0',
'CST': icms_cst,
'ModBC': inv_line.icms_base_type,
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
}
StrN02 = 'N02|%s|%s|%s|%s|%s|%s|\n' % (
StrRegN02['Orig'], StrRegN02['CST'], StrRegN02['ModBC'], StrRegN02['VBC'], StrRegN02['PICMS'],
StrRegN02['VICMS'])
StrFile += StrN02
if icms_cst in ('10',):
StrRegN03 = {
'Orig': inv_line.product_id.origin or '0',
'CST': icms_cst,
'ModBC': inv_line.icms_base_type,
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
'ModBCST': inv_line.icms_st_base_type,
'PMVAST': str("%.2f" % inv_line.icms_st_mva) or '',
'PRedBCST': '',
'VBCST': str("%.2f" % inv_line.icms_st_base),
'PICMSST': str("%.2f" % inv_line.icms_st_percent),
'VICMSST': str("%.2f" % inv_line.icms_st_value),
}
StrN03 = 'N03|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (
StrRegN03['Orig'], StrRegN03['CST'], StrRegN03['ModBC'],
StrRegN03['VBC'], StrRegN03['PICMS'], StrRegN03['VICMS'],
StrRegN03['ModBCST'], StrRegN03['PMVAST'],
StrRegN03['PRedBCST'], StrRegN03['VBCST'],
StrRegN03['PICMSST'], StrRegN03['VICMSST'])
StrFile += StrN03
if icms_cst in ('20',):
StrRegN04 = {
'Orig': inv_line.product_id.origin or '0',
'CST': icms_cst,
'ModBC': inv_line.icms_base_type,
'PRedBC': str("%.2f" % inv_line.icms_percent_reduction),
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
}
StrN04 = 'N04|%s|%s|%s|%s|%s|%s|%s|\n' % (
StrRegN04['Orig'], StrRegN04['CST'], StrRegN04['ModBC'],
StrRegN04['PRedBC'], StrRegN04['VBC'], StrRegN04['PICMS'],
StrRegN04['VICMS'])
StrFile += StrN04
if icms_cst in ('40', '50'):
StrRegN06 = {
'Orig': inv_line.product_id.origin or '0',
'CST': icms_cst,
'vICMS': str("%.2f" % inv_line.icms_value),
'motDesICMS': '9', # FIXME
}
StrN06 = 'N06|%s|%s|%s|%s|\n' % (
StrRegN06['Orig'], StrRegN06['CST'], StrRegN06['vICMS'],
StrRegN06['motDesICMS'])
StrFile += StrN06
if icms_cst in ('41',):
StrRegN06 = {
'Orig': inv_line.product_id.origin or '0',
'CST': icms_cst,
'vICMS': '',
'motDesICMS': '',
}
StrN06 = 'N06|%s|%s|%s|%s|\n' % (
StrRegN06['Orig'], StrRegN06['CST'], StrRegN06['vICMS'],
StrRegN06['motDesICMS'])
StrFile += StrN06
if icms_cst in ('51',):
StrRegN07 = {
'Orig': inv_line.product_id.origin or '0',
'CST': icms_cst,
'ModBC': inv_line.icms_base_type,
'PRedBC': str("%.2f" % inv_line.icms_percent_reduction),
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
}
StrN07 = 'N07|%s|%s|%s|%s|%s|%s|%s|\n' % (
StrRegN07['Orig'], StrRegN07['CST'], StrRegN07['ModBC'],
StrRegN07['PRedBC'], StrRegN07['VBC'],
StrRegN07['PICMS'], StrRegN07['VICMS'])
StrFile += StrN07
if icms_cst in ('60',):
StrRegN08 = {
'Orig': inv_line.product_id.origin or '0',
'CST': icms_cst,
'VBCST': str("%.2f" % 0.00),
'VICMSST': str("%.2f" % 0.00),
}
StrN08 = 'N08|%s|%s|%s|%s|\n' % (
StrRegN08['Orig'], StrRegN08['CST'], StrRegN08['VBCST'], StrRegN08['VICMSST'])
StrFile += StrN08
if icms_cst in ('70',):
StrRegN09 = {
'Orig': inv_line.product_id.origin or '0',
'CST': icms_cst,
'ModBC': inv_line.icms_base_type,
'PRedBC': str("%.2f" % inv_line.icms_percent_reduction),
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
'ModBCST': inv_line.icms_st_base_type,
'PMVAST': str("%.2f" % inv_line.icms_st_mva) or '',
'PRedBCST': '',
'VBCST': str("%.2f" % inv_line.icms_st_base),
'PICMSST': str("%.2f" % inv_line.icms_st_percent),
'VICMSST': str("%.2f" % inv_line.icms_st_value),
}
StrN09 = 'N09|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (
StrRegN09['Orig'], StrRegN09['CST'], StrRegN09['ModBC'], StrRegN09['PRedBC'], StrRegN09['VBC'],
StrRegN09['PICMS'], StrRegN09['VICMS'], StrRegN09['ModBCST'], StrRegN09['PMVAST'],
StrRegN09['PRedBCST'], StrRegN09['VBCST'], StrRegN09['PICMSST'], StrRegN09['VICMSST'])
StrFile += StrN09
if icms_cst in ('90',):
StrRegN10 = {
'Orig': inv_line.product_id.origin or '0',
'CST': icms_cst,
'ModBC': inv_line.icms_base_type,
'PRedBC': str("%.2f" % inv_line.icms_percent_reduction),
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
'ModBCST': inv_line.icms_st_base_type,
'PMVAST': str("%.2f" % inv_line.icms_st_mva) or '',
'PRedBCST': '',
'VBCST': str("%.2f" % inv_line.icms_st_base),
'PICMSST': str("%.2f" % inv_line.icms_st_percent),
'VICMSST': str("%.2f" % inv_line.icms_st_value),
}
StrN10 = 'N10|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (
StrRegN10['Orig'], StrRegN10['CST'], StrRegN10['ModBC'], StrRegN10['PRedBC'], StrRegN10['VBC'],
StrRegN10['PICMS'], StrRegN10['VICMS'], StrRegN10['ModBCST'], StrRegN10['PMVAST'],
StrRegN10['PRedBCST'], StrRegN10['VBCST'], StrRegN10['PICMSST'], StrRegN10['VICMSST'])
StrFile += StrN10
if icms_cst in ('101',):
StrRegN10c = {
'Orig': inv_line.product_id.origin or '0',
'CSOSN': icms_cst,
'pCredSN': str("%.2f" % inv_line.icms_percent),
'vCredICMSSN': str("%.2f" % inv_line.icms_value),
}
StrN10c = 'N10c|%s|%s|%s|%s|\n' % (
StrRegN10c['Orig'], StrRegN10c['CSOSN'], StrRegN10c['pCredSN'], StrRegN10c['vCredICMSSN'])
StrFile += StrN10c
# Incluido CST 102,103 e 300 - Uso no Simples Nacional - Linha original era para CST 400
if icms_cst in ('102', '103', '300', '400'):
StrRegN10d = {
'Orig': inv_line.product_id.origin or '0',
'CSOSN': icms_cst
}
StrN10d = 'N10d|%s|%s|\n' % (StrRegN10d['Orig'], StrRegN10d['CSOSN'])
StrFile += StrN10d
if icms_cst in ('201',):
StrRegN10e = {
'Orig': inv_line.product_id.origin or '0',
'CSOSN': icms_cst,
'ModBCST': inv_line.icms_st_base_type,
'PMVAST': str("%.2f" % inv_line.icms_st_mva) or '',
'PRedBCST': '',
'VBCST': str("%.2f" % inv_line.icms_st_base),
'PICMSST': str("%.2f" % inv_line.icms_st_percent),
'VICMSST': str("%.2f" % inv_line.icms_st_value),
'pCredSN': str("%.2f" % 0.00), # TODO Obter aliquota ICMS Simples
'vCredICMSSN': str("%.2f" % 0.00), #TODO Calcular Crédito ICMS baseado aliquota anterior.
}
StrN10e = 'N10e|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegN10e['Orig'],
StrRegN10e['CSOSN'],
StrRegN10e['ModBCST'],
StrRegN10e['PMVAST'],
StrRegN10e['PRedBCST'],
StrRegN10e['VBCST'],
StrRegN10e['PICMSST'],
StrRegN10e['VICMSST'],
StrRegN10e['pCredSN'],
StrRegN10e['vCredICMSSN'])
StrFile += StrN10e
if icms_cst in ('202', '203'):
StrRegN10f = {
'Orig': inv_line.product_id.origin or '0',
'CSOSN': icms_cst,
'ModBCST': inv_line.icms_st_base_type,
'PMVAST': str("%.2f" % inv_line.icms_st_mva) or '',
'PRedBCST': '',
'VBCST': str("%.2f" % inv_line.icms_st_base),
'PICMSST': str("%.2f" % inv_line.icms_st_percent),
'VICMSST': str("%.2f" % inv_line.icms_st_value),
}
StrN10f = 'N10f|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegN10f['Orig'],
StrRegN10f['CSOSN'],
StrRegN10f['ModBCST'],
StrRegN10f['PMVAST'],
StrRegN10f['PRedBCST'],
StrRegN10f['VBCST'],
StrRegN10f['PICMSST'],
StrRegN10f['VICMSST'])
StrFile += StrN10f
if icms_cst in ('500',):
StrRegN10g = {
'Orig': inv_line.product_id.origin or '0',
'CSOSN': icms_cst,
'vBCSTRet': '', # Todo - Variavel cf. Faixa faturamento
'vICMSSTRet': '' # Todo - Variavel cf. Faixa faturamento
}
StrN10g = 'N10g|%s|%s|%s|%s|\n' % (
StrRegN10g['Orig'], StrRegN10g['CSOSN'], StrRegN10g['vBCSTRet'], StrRegN10g['vICMSSTRet'])
StrFile += StrN10g
if icms_cst in ('900',):
StrRegN10h = {
'Orig': inv_line.product_id.origin or '0',
'CSOSN': icms_cst,
'modBC': inv_line.icms_base_type,
'vBC': str("%.2f" % 0.00),
'pRedBC': '',
'pICMS': str("%.2f" % 0.00),
'vICMS': str("%.2f" % 0.00),
'modBCST': '',
'pMVAST': '',
'pRedBCST': '',
'vBCST': '',
'pICMSST': '',
'vICMSST': '',
'pCredSN': str("%.2f" % 0.00),
'vCredICMSSN': str("%.2f" % 0.00),
}
StrN10h = 'N10h|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegN10h['Orig'],
StrRegN10h['CSOSN'],
StrRegN10h['modBC'],
StrRegN10h['vBC'],
StrRegN10h['pRedBC'],
StrRegN10h['pICMS'],
StrRegN10h['vICMS'],
StrRegN10h['modBCST'],
StrRegN10h['pMVAST'],
StrRegN10h['pRedBCST'],
StrRegN10h['vBCST'],
StrRegN10h['pICMSST'],
StrRegN10h['vICMSST'],
StrRegN10h['pCredSN'],
StrRegN10h['vCredICMSSN'])
StrFile += StrN10h
StrRegO = {
'ClEnq': '',
'CNPJProd': '',
'CSelo': '',
'QSelo': '',
'CEnq': '999',
}
StrO = 'O|%s|%s|%s|%s|%s|\n' % (
StrRegO['ClEnq'], StrRegO['CNPJProd'], StrRegO['CSelo'], StrRegO['QSelo'], StrRegO['CEnq'])
StrFile += StrO
if ipi_cst in ('50', '51', '52') and inv_line.ipi_percent > 0:
StrRegO07 = {
'CST': ipi_cst,
'VIPI': str("%.2f" % inv_line.ipi_value),
}
StrO07 = 'O07|%s|%s|\n' % (StrRegO07['CST'], StrRegO07['VIPI'])
StrFile += StrO07
if inv_line.ipi_type == 'percent' or '':
StrRegO10 = {
'VBC': str("%.2f" % inv_line.ipi_base),
'PIPI': str("%.2f" % inv_line.ipi_percent),
}
StrO1 = 'O10|%s|%s|\n' % (StrRegO10['VBC'], StrRegO10['PIPI'])
if inv_line.ipi_type == 'quantity':
pesol = 0
if inv_line.product_id:
pesol = inv_line.product_id.weight_net
StrRegO11 = {
'QUnid': str("%.4f" % (inv_line.quantity * pesol)),
'VUnid': str("%.4f" % inv_line.ipi_percent),
}
StrO1 = 'O11|%s|%s|\n' % (StrRegO11['QUnid'], StrRegO11['VUnid'])
StrFile += StrO1
if ipi_cst in ('99'):
StrRegO07 = {
'CST': ipi_cst,
'VIPI': str("%.2f" % inv_line.ipi_value),
}
StrO07 = ('O07|%s|%s|\n') % (StrRegO07['CST'], StrRegO07['VIPI'])
StrFile += StrO07
StrRegO10 = {
'VBC': str("%.2f" % inv_line.ipi_base),
'PIPI': str("%.2f" % inv_line.ipi_percent),
}
StrO10 = ('O10|%s|%s|\n') % (StrRegO10['VBC'], StrRegO10['PIPI'])
StrFile += StrO10
if inv_line.ipi_percent == 0 and not ipi_cst in ('99'):
StrO1 = 'O08|%s|\n' % ipi_cst
StrFile += StrO1
StrRegP = {
'VBC': str("%.2f" % inv_line.ii_base),
'VDespAdu': str("%.2f" % inv_line.ii_customhouse_charges),
'VII': str("%.2f" % inv_line.ii_value),
'VIOF': str("%.2f" % inv_line.ii_iof),
}
StrP = ('P|%s|%s|%s|%s|\n') % (StrRegP['VBC'], StrRegP['VDespAdu'], StrRegP['VII'], StrRegP['VIOF'])
StrFile += StrP
if inv_line.product_type == 'service':
StrRegU = {
'VBC': str("%.2f" % inv_line.issqn_base),
'VAliq': str("%.2f" % inv_line.issqn_percent),
'VISSQN': str("%.2f" % inv_line.issqn_value),
'CMunFG': ('%s%s') % (inv.partner_id.state_id.ibge_code, inv.partner_id.l10n_br_city_id.ibge_code),
'CListServ': punctuation_rm(inv_line.service_type_id.code),
'cSitTrib': inv_line.issqn_type
}
StrU = ('U|%s|%s|%s|%s|%s|%s|\n') % (
StrRegU['VBC'],
StrRegU['VAliq'],
StrRegU['VISSQN'],
StrRegU['CMunFG'],
StrRegU['CListServ'],
StrRegU['cSitTrib'])
StrFile += StrU
StrQ = 'Q|\n'
StrFile += StrQ
if pis_cst in ('01', '02') and inv_line.pis_percent > 0:
StrRegQ02 = {
'CST': pis_cst,
'VBC': str("%.2f" % inv_line.pis_base),
'PPIS': str("%.2f" % inv_line.pis_percent),
'VPIS': str("%.2f" % inv_line.pis_value),
}
StrQ02 = ('Q02|%s|%s|%s|%s|\n') % (StrRegQ02['CST'],
StrRegQ02['VBC'],
StrRegQ02['PPIS'],
StrRegQ02['VPIS'])
StrFile += StrQ02
if pis_cst in ('99', '49'):
StrRegQ05 = {
'CST': pis_cst,
'VPIS': str("%.2f" % inv_line.pis_value),
}
StrQ05 = ('Q05|%s|%s|\n') % (StrRegQ05['CST'], StrRegQ05['VPIS'])
StrFile += StrQ05
StrRegQ07 = {
'VBC': str("%.2f" % inv_line.pis_base),
'PPIS': str("%.2f" % inv_line.pis_percent),
}
StrQ07 = ('Q07|%s|%s|\n') % (StrRegQ07['VBC'], StrRegQ07['PPIS'])
StrFile += StrQ07
if inv_line.pis_percent == 0 and not pis_cst in ('99', '49'):
StrQ02 = 'Q04|%s|\n' % pis_cst
StrFile += StrQ02
StrQ = 'S|\n'
StrFile += StrQ
if cofins_cst in ('01', '02') and inv_line.cofins_percent > 0:
StrRegS02 = {
'CST': cofins_cst,
'VBC': str("%.2f" % inv_line.cofins_base),
'PCOFINS': str("%.2f" % inv_line.cofins_percent),
'VCOFINS': str("%.2f" % inv_line.cofins_value),
}
StrS02 = ('S02|%s|%s|%s|%s|\n') % (
StrRegS02['CST'], StrRegS02['VBC'], StrRegS02['PCOFINS'], StrRegS02['VCOFINS'])
StrFile += StrS02
if cofins_cst in ('99', '49'):
StrRegS05 = {
'CST': cofins_cst,
'VCOFINS': str("%.2f" % inv_line.cofins_value),
}
StrS05 = ('S05|%s|%s|\n') % (StrRegS05['CST'], StrRegS05['VCOFINS'])
StrFile += StrS05
StrRegS07 = {
'VBC': str("%.2f" % inv_line.cofins_base),
'PCOFINS': str("%.2f" % inv_line.cofins_percent),
}
StrS07 = ('S07|%s|%s|\n') % (StrRegS07['VBC'], StrRegS07['PCOFINS'])
StrFile += StrS07
if inv_line.cofins_percent == 0 and not cofins_cst in ('99', '49'):
StrS04 = 'S04|%s|\n' % cofins_cst
StrFile += StrS04
StrW = 'W|\n'
StrFile += StrW
StrRegW02 = {
'vBC': str("%.2f" % inv.icms_base),
'vICMS': str("%.2f" % inv.icms_value),
'vBCST': str("%.2f" % inv.icms_st_base),
'vST': str("%.2f" % inv.icms_st_value),
'vProd': str("%.2f" % inv.amount_gross),
'vFrete': str("%.2f" % inv.amount_freight),
'vSeg': str("%.2f" % inv.amount_insurance),
'vDesc': str("%.2f" % inv.amount_discount),
'vII': str("%.2f" % inv.ii_value),
'vIPI': str("%.2f" % inv.ipi_value),
'vPIS': str("%.2f" % inv.pis_value),
'vCOFINS': str("%.2f" % inv.cofins_value),
'vOutro': str("%.2f" % inv.amount_costs),
'vNF': str("%.2f" % inv.amount_total),
}
StrW02 = 'W02|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (
StrRegW02['vBC'], StrRegW02['vICMS'], StrRegW02['vBCST'], StrRegW02['vST'], StrRegW02['vProd'],
StrRegW02['vFrete'], StrRegW02['vSeg'], StrRegW02['vDesc'], StrRegW02['vII'], StrRegW02['vIPI'],
StrRegW02['vPIS'], StrRegW02['vCOFINS'], StrRegW02['vOutro'], StrRegW02['vNF'])
StrFile += StrW02
# Modo do Frete: 0- Por conta do emitente; 1- Por conta do destinatário/remetente; 2- Por conta de terceiros; 9- Sem frete (v2.0)
try:
if not inv.incoterm:
StrRegX0 = '9'
else:
StrRegX0 = inv.incoterm.freight_responsibility
except AttributeError:
StrRegX0 = '9'
StrX = 'X|%s|\n' % (StrRegX0)
StrFile += StrX
StrRegX03 = {
'XNome': '',
'IE': '',
'XEnder': '',
'UF': '',
'XMun': '',
}
StrX0 = ''
try:
StrRegX03['XNome'] = normalize('NFKD', unicode(inv.carrier_name or '')).encode('ASCII', 'ignore')
if inv.carrier_id:
#Endereço da transportadora
carrier_addr = pool.get('res.partner').address_get(cr, uid, [inv.carrier_id.partner_id.id], ['default'])
carrier_addr_default = pool.get('res.partner').browse(cr, uid, [carrier_addr['default']])[0]
if inv.carrier_id.partner_id.legal_name:
StrRegX03['XNome'] = normalize('NFKD', unicode(inv.carrier_id.partner_id.legal_name or '')).encode(
'ASCII', 'ignore')
else:
StrRegX03['XNome'] = normalize('NFKD', unicode(inv.carrier_id.partner_id.name or '')).encode(
'ASCII', 'ignore')
StrRegX03['IE'] = inv.carrier_id.partner_id.inscr_est or ''
StrRegX03['XEnder'] = normalize('NFKD', unicode(carrier_addr_default.street or '')).encode('ASCII',
'ignore')
StrRegX03['UF'] = carrier_addr_default.state_id.code or ''
if carrier_addr_default.l10n_br_city_id:
StrRegX03['XMun'] = normalize('NFKD',
unicode(carrier_addr_default.l10n_br_city_id.name or '')).encode(
'ASCII', 'ignore')
if inv.carrier_id.partner_id.is_company:
StrX0 = 'X04|%s|\n' % (punctuation_rm(inv.carrier_id.partner_id.cnpj_cpf))
else:
StrX0 = 'X05|%s|\n' % (punctuation_rm(inv.carrier_id.partner_id.cnpj_cpf))
except AttributeError:
pass
StrX03 = 'X03|%s|%s|%s|%s|%s|\n' % (
StrRegX03['XNome'], StrRegX03['IE'], StrRegX03['XEnder'], StrRegX03['UF'], StrRegX03['XMun'])
StrFile += StrX03
StrFile += StrX0
StrRegX18 = {
'Placa': '',
'UF': '',
'RNTC': '',
}
if inv.vehicle_plate:
try:
StrRegX18['Placa'] = inv.vehicle_plate or ''
StrRegX18['UF'] = inv.vehicle_state_id.code or ''
if inv.vehicle_id:
StrRegX18['RNTC'] = inv.vehicle_id.rntc_code or ''
except AttributeError:
pass
StrX18 = 'X18|%s|%s|%s|\n' % (StrRegX18['Placa'], StrRegX18['UF'], StrRegX18['RNTC'])
StrFile += StrX18
StrRegX26 = {
'QVol': '',
'Esp': '',
'Marca': '',
'NVol': '',
'PesoL': '',
'PesoB': '',
}
if inv.number_of_packages:
StrRegX26['QVol'] = inv.number_of_packages
StrRegX26['Esp'] = 'Volume' #TODO
StrRegX26['Marca']
StrRegX26['NVol']
StrRegX26['PesoL'] = str("%.3f" % inv.weight_net)
StrRegX26['PesoB'] = str("%.3f" % inv.weight)
StrX26 = 'X26|%s|%s|%s|%s|%s|%s|\n' % (
StrRegX26['QVol'], StrRegX26['Esp'], StrRegX26['Marca'], StrRegX26['NVol'], StrRegX26['PesoL'],
StrRegX26['PesoB'])
StrFile += StrX26
if inv.journal_id.revenue_expense:
StrY = 'Y|\n'
StrFile += StrY
for line in inv.move_line_receivable_id:
if inv.type in ('out_invoice', 'in_refund'):
value = line.debit
else:
value = line.credit
StrRegY07 = {
'NDup': line.name,
'DVenc': line.date_maturity or inv.date_due or inv.date_invoice,
'VDup': str("%.2f" % value),
}
StrY07 = 'Y07|%s|%s|%s|\n' % (StrRegY07['NDup'], StrRegY07['DVenc'], StrRegY07['VDup'])
StrFile += StrY07
StrRegZ = {
'InfAdFisco': normalize('NFKD', unicode(inv.fiscal_comment or '')).encode('ASCII', 'ignore'),
'InfCpl': normalize('NFKD', unicode(inv.comment or '')).encode('ASCII', 'ignore'),
}
StrZ = 'Z|%s|%s|\n' % (StrRegZ['InfAdFisco'], StrRegZ['InfCpl'])
StrFile += StrZ
if UFEmbarq != '' or XLocEmbarq != '':
StrRegZA = {
'UFEmbarq': UFEmbarq,
'XLocEmbarq': XLocEmbarq,
}
StrZA = 'ZA|%s|%s|\n' % (StrRegZA['UFEmbarq'], StrRegZA['XLocEmbarq'])
StrFile += StrZA
documents = inv.internal_number
pool.get('account.invoice').write(cr, uid, [inv.id], {'nfe_export_date': datetime.now()})
nfes.append({'key': documents, 'nfe': StrFile,
'message': ''})
return nfes
#return unicode(StrFile.encode('utf-8'), errors='replace')
def nfe_import(cr, ids, nfe_environment='1', context=False):
return 'TESTE Import'
| agpl-3.0 |
quake0day/Dodrio | scrape_author.py | 1 | 4074 | import sys # used for taking input from the command line
from bs4 import BeautifulSoup # used for parsing and doing some funky HTML stuff
import urllib # used for grabbing data from URLs
import unicodedata # for converting unicode to ascii
"""
Jon Hurlock's Google Scholar Tools by Jon Hurlock (@jonhurlock)
* is licensed under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
* Permissions beyond the scope of this license may be available at http://cs.swan.ac.uk/~csjonhurlock/
--------
The following page scrapes important data from a google scholar's page.
You must provide an URL for an individual scholar, enclosed in quotations marks
To run
--------
python scrape_author.py "some url"
python scrape_author.py help
Output
--------
Publications for Jonathan Hurlock:
Searching Twitter: Separating the Tweet from the Chaff. ==> http://scholar.google.co.uk/citations?view_op=view_citation&hl=en&oe=ASCII&user=pu0mIWgAAAAJ&citation_for_view=pu0mIWgAAAAJ:u5HHmVD_uO8C
Keyword clouds: having very little effect on sensemaking in web search engines ==> http://scholar.google.co.uk/citations?view_op=view_citation&hl=en&oe=ASCII&user=pu0mIWgAAAAJ&citation_for_view=pu0mIWgAAAAJ:u-x6o8ySG0sC
"""
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Error: You have not given the script an URL"
print "Try again, and try running something such as:"
print "$ python scrape_author.py \"http://scholar.google.co.uk/citations?user=pu0mIWgAAAAJ&hl=en\""
exit()
else:
url_to_scrape = sys.argv[1]
if sys.argv[1].strip() == "help":
print "You must run the following command"
print "$ python scrape_author.py \"someurl\""
print "someurl - has to be surrounded by quotation marks"
print "it must also be a page which is for a specific author."
print "contact @jonhurlock on twitter for more information."
exit()
def scrape_author(url_to_scrape):
# go get content from an URL
#url_to_scrape = "http://scholar.google.co.uk/citations?user=pu0mIWgAAAAJ&hl=en"
f = urllib.urlopen(url_to_scrape)
html_doc = f.read()
# convert output to something beautiful soup can use
soup = BeautifulSoup(html_doc)
# Get the Authors Name
author_name = ""
divs = soup.find_all('div') # says get all the divs
for div in divs:
if div.has_attr('id'):
if div['id']=='gsc_prf_in':
author_name = div.get_text(strip=True)
if type(author_name)==unicode:
author_name = (unicodedata.normalize('NFKD', author_name).encode('ascii','ignore')).strip()
# Get the Publications and Links to
publications = []
# some setup stuff
tables = soup.find_all('table') # says get all the divs
publication_table = None
# traverse the DOM tree by divs
for table in tables:
if table.has_attr('id'):
if table['id']=='gsc_a_t':
publication_table = table.find_all('td')
for data in publication_table:
if u'gsc_a_t' in data['class']:
# papers title
paper_title = data.a.get_text(strip=True)
if type(paper_title)==unicode:
paper_title = (unicodedata.normalize('NFKD', paper_title).encode('ascii','ignore')).strip()
# link to the paper
paper_link = data.a['href']
if type(paper_link)==unicode:
paper_link = (unicodedata.normalize('NFKD', paper_link).encode('ascii','ignore')).strip()
paper_link = 'http://scholar.google.co.uk'+paper_link
publications.append([paper_title,paper_link])
# Printing out the Info:
result = []
#print 'Publications for '+author_name+':'
for publication in publications:
#print "\n"+publication[0]+" ==> "+publication[1]
result.append(publication)
return result
| mit |
jetty840/ReplicatorG | skein_engines/skeinforge-50/fabmetheus_utilities/geometry/statements/while.py | 13 | 1165 | """
Polygon path.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def processElementNode(elementNode):
"Process the xml element."
if elementNode.xmlObject == None:
if 'condition' in elementNode.attributes:
value = elementNode.attributes['condition']
elementNode.xmlObject = evaluate.getEvaluatorSplitWords(value)
else:
elementNode.xmlObject = []
if len( elementNode.xmlObject ) < 1:
return
xmlProcessor = elementNode.getXMLProcessor()
if len( xmlProcessor.functions ) < 1:
return
function = xmlProcessor.functions[-1]
while evaluate.getEvaluatedExpressionValueBySplitLine(elementNode, elementNode.xmlObject) > 0:
function.processChildNodes(elementNode)
| gpl-2.0 |
woelfware/BluMote | test/button_learn.py | 1 | 2538 | #!/usr/bin/env python
# Copyright (C) 2011 Woelfware
from bluetooth import *
import blumote
import cPickle
import os
import sys
import time
class Blumote_Client(blumote.Services):
def __init__(self):
blumote.Services.__init__(self)
self.addr = None
def find_blumote_pods(self, pod_name = None):
if pod_name is None:
pod_name = self.service["name"]
print "Searching for \"%s\" service..." % (pod_name)
return find_service(name = pod_name)
def connect_to_blumote_pod(self, addr):
self.client_sock = BluetoothSocket(RFCOMM)
self.client_sock.connect((addr, 1))
def transport_tx(self, cmd, msg):
full_msg = struct.pack("B", cmd)
full_msg += msg
self.client_sock.send(full_msg)
def _learn_unpack_msg(self, msg):
return_msg = [msg]
pkt_nbr = 0
print 'pkt %i len %i' % (pkt_nbr, len(msg))
print 'ack/nak:', hex(ord(msg[0]))
if len(msg) == 1:
if ord(msg[0]) == 0x15:
return
msg = self.client_sock.recv(256)
return_msg.append(msg)
pkt_nbr += 1
print 'pkt %i len %i' % (pkt_nbr, len(msg))
code_len = ord(msg[1])
print 'code length:', code_len
frequency = ord(msg[2])
print 'carrier frequency:', frequency, 'kHz'
while (sum([len(str) for str in return_msg]) < code_len + 2):
return_msg.append(self.client_sock.recv(256))
return_msg = ''.join(return_msg)
for i in xrange(4, len(return_msg), 2):
print i, ':', int(ord(return_msg[i]) * 256 + ord(return_msg[i + 1]))
return return_msg[1:] # strip the ack
def learn(self):
self.transport_tx(self.cmd_codes.learn, "")
msg = self.client_sock.recv(1024)
return self._learn_unpack_msg(msg)
if __name__ == "__main__":
bm_remote = Blumote_Client()
found = False
while not found:
try:
nearby_devices = discover_devices(lookup_names = True)
except:
print 'failed to find a blumote... retrying'
nearby_devices = ()
print 'found %d device(s)' % len(nearby_devices)
for addr, name in nearby_devices:
if name[:len('BluMote')] == 'BluMote':
print 'connecting to', addr, name
bm_remote.connect_to_blumote_pod(addr)
found = True
break
done = False
while not done:
button_name = raw_input('What button would you like to learn ("done" to quit)? ')
if button_name in ('done', '"done"'):
done = True
continue
print 'Please push %s on your remote.' % (button_name)
key_code = bm_remote.learn()
button = open('%s.pkl' % (''.join(button_name.split())), 'wb')
cPickle.dump(key_code, button, cPickle.HIGHEST_PROTOCOL)
button.close()
bm_remote.client_sock.close()
| gpl-3.0 |
gvalentine/pyqtgraph | examples/GLVolumeItem.py | 28 | 1968 | # -*- coding: utf-8 -*-
"""
Demonstrates GLVolumeItem for displaying volumetric data.
"""
## Add path to library (just for examples; you do not need this)
import initExample
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.opts['distance'] = 200
w.show()
w.setWindowTitle('pyqtgraph example: GLVolumeItem')
#b = gl.GLBoxItem()
#w.addItem(b)
g = gl.GLGridItem()
g.scale(10, 10, 1)
w.addItem(g)
import numpy as np
## Hydrogen electron probability density
def psi(i, j, k, offset=(50,50,100)):
x = i-offset[0]
y = j-offset[1]
z = k-offset[2]
th = np.arctan2(z, (x**2+y**2)**0.5)
phi = np.arctan2(y, x)
r = (x**2 + y**2 + z **2)**0.5
a0 = 2
#ps = (1./81.) * (2./np.pi)**0.5 * (1./a0)**(3/2) * (6 - r/a0) * (r/a0) * np.exp(-r/(3*a0)) * np.cos(th)
ps = (1./81.) * 1./(6.*np.pi)**0.5 * (1./a0)**(3/2) * (r/a0)**2 * np.exp(-r/(3*a0)) * (3 * np.cos(th)**2 - 1)
return ps
#return ((1./81.) * (1./np.pi)**0.5 * (1./a0)**(3/2) * (r/a0)**2 * (r/a0) * np.exp(-r/(3*a0)) * np.sin(th) * np.cos(th) * np.exp(2 * 1j * phi))**2
data = np.fromfunction(psi, (100,100,200))
positive = np.log(np.clip(data, 0, data.max())**2)
negative = np.log(np.clip(-data, 0, -data.min())**2)
d2 = np.empty(data.shape + (4,), dtype=np.ubyte)
d2[..., 0] = positive * (255./positive.max())
d2[..., 1] = negative * (255./negative.max())
d2[..., 2] = d2[...,1]
d2[..., 3] = d2[..., 0]*0.3 + d2[..., 1]*0.3
d2[..., 3] = (d2[..., 3].astype(float) / 255.) **2 * 255
d2[:, 0, 0] = [255,0,0,100]
d2[0, :, 0] = [0,255,0,100]
d2[0, 0, :] = [0,0,255,100]
v = gl.GLVolumeItem(d2)
v.translate(-50,-50,-100)
w.addItem(v)
ax = gl.GLAxisItem()
w.addItem(ax)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| mit |
rsj217/tornado--scaffold | tornapro/ghost/app/helper.py | 1 | 1216 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'ghost'
import urlparse
import tornado.web
from tornado.httputil import HTTPServerRequest
from .auth.models import User
class BaseAuthHandler(tornado.web.RequestHandler):
def get_current_user(self):
cookie = self.get_secure_cookie('user')
if cookie:
self._current_user = User.getone(pk=int(cookie))
return self._current_user
self.clear_cookie('user')
return
def extract_params(request):
if not isinstance(request, HTTPServerRequest):
request = request.request
parse_url = urlparse.urlparse(request.uri)
path, params, query, fragment = parse_url.path, parse_url.params, parse_url.query, parse_url.fragment
uri = urlparse.urlunparse((request.protocol, request.host, path, params, query, fragment))
http_method = request.method
headers = request.headers
if 'wsgi.input' in headers:
del headers['wsgi.input']
if 'wsgi.errors' in headers:
del headers['wsgi.errors']
if 'HTTP_AUTHORIZATION' in headers:
headers['Authorization'] = headers['HTTP_AUTHORIZATION']
body = request.body
return uri, http_method, body, headers | mit |
mattjmorrison/ReportLab | tests/test_pdfbase_pdfmetrics.py | 5 | 4192 | #Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#test_pdfbase_pdfmetrics_widths
"""
Various tests for PDF metrics.
The main test prints out a PDF documents enabling checking of widths of every
glyph in every standard font. Long!
"""
__version__='''$Id: test_pdfbase_pdfmetrics.py 3288 2008-09-15 11:03:17Z rgbecker $'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import unittest
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase import _fontdata
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib import colors
verbose = 0
fontNamesToTest = _fontdata.standardFonts #[0:12] #leaves out Symbol and Dingbats for now
def decoratePage(c, header):
c.setFont('Helvetica-Oblique',10)
c.drawString(72, 800, header)
c.drawCentredString(297, 54, 'Page %d' % c.getPageNumber())
def makeWidthTestForAllGlyphs(canv, fontName, outlining=1):
"""New page, then runs down doing all the glyphs in one encoding"""
thisFont = pdfmetrics.getFont(fontName)
encName = thisFont.encName
canv.setFont('Helvetica-Bold', 12)
title = 'Glyph Metrics Test for font %s, ascent=%s, descent=%s, encoding=%s' % (fontName, str(thisFont.face.ascent), str(thisFont.face.descent), encName)
canv.drawString(80, 750, title)
canv.setFont('Helvetica-Oblique',10)
canv.drawCentredString(297, 54, 'Page %d' % canv.getPageNumber())
if outlining:
# put it in the outline
canv.bookmarkPage('GlyphWidths:' + fontName)
canv.addOutlineEntry(fontName,'GlyphWidths:' + fontName, level=1)
y = 720
widths = thisFont.widths
glyphNames = thisFont.encoding.vector
# need to get the right list of names for the font in question
for i in range(256):
if y < 72:
canv.showPage()
decoratePage(canv, title)
y = 750
glyphName = glyphNames[i]
if glyphName is not None:
canv.setFont('Helvetica', 10)
text = unicode(chr(i),encName).encode('utf8')*30
try:
w = canv.stringWidth(text, fontName, 10)
canv.drawString(80, y, '%03d %s w=%3d' % (i, glyphName, int((w/3.)*10)))
canv.setFont(fontName, 10)
canv.drawString(200, y, text)
# now work out width and put a red marker next to the end.
canv.setFillColor(colors.red)
canv.rect(200 + w, y-1, 5, 10, stroke=0, fill=1)
canv.setFillColor(colors.black)
except KeyError:
canv.drawString(200, y, 'Could not find glyph named "%s"' % glyphName)
y = y - 12
def makeTestDoc(fontNames):
filename = outputfile('test_pdfbase_pdfmetrics.pdf')
c = Canvas(filename)
c.bookmarkPage('Glyph Width Tests')
c.showOutline()
c.addOutlineEntry('Glyph Width Tests', 'Glyph Width Tests', level=0)
if verbose:
print # get it on a different line to the unittest log output.
for fontName in fontNames:
if verbose:
print 'width test for', fontName
makeWidthTestForAllGlyphs(c, fontName)
c.showPage()
c.save()
if verbose:
if verbose:
print 'saved',filename
class PDFMetricsTestCase(unittest.TestCase):
"Test various encodings used in PDF files."
def test0(self):
"Visual test for correct glyph widths"
makeTestDoc(fontNamesToTest)
def makeSuite():
return makeSuiteForClasses(PDFMetricsTestCase)
#noruntests
if __name__=='__main__':
usage = """Usage:
(1) test_pdfbase_pdfmetrics.py - makes doc for all standard fonts
(2) test_pdfbase_pdfmetrics.py fontname - " " for just one font."""
import sys
verbose = 1
# accept font names as arguments; otherwise it does the lot
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
if not arg in fontNamesToTest:
print 'unknown font %s' % arg
print usage
sys.exit(0)
fontNamesToTest = sys.argv[1:]
unittest.TextTestRunner().run(makeSuite())
printLocation()
| bsd-3-clause |
stephane-martin/salt-debian-packaging | salt-2016.3.2/salt/utils/preseed.py | 2 | 2635 | # -*- coding: utf-8 -*-
'''
Utilities for managing Debian preseed
.. versionadded:: Beryllium
'''
from __future__ import absolute_import
import yaml
import shlex
import salt.utils
def mksls(src, dst=None):
'''
Convert a preseed file to an SLS file
'''
ps_opts = {}
with salt.utils.fopen(src, 'r') as fh_:
for line in fh_:
if line.startswith('#'):
continue
if not line.strip():
continue
comps = shlex.split(line)
if comps[0] not in ps_opts.keys():
ps_opts[comps[0]] = {}
cmds = comps[1].split('/')
pointer = ps_opts[comps[0]]
for cmd in cmds:
pointer = pointer.setdefault(cmd, {})
pointer['type'] = comps[2]
if len(comps) > 3:
pointer['argument'] = comps[3]
sls = {}
# Set language
# ( This looks like it maps to something else )
sls[ps_opts['d-i']['languagechooser']['language-name-fb']['argument']] = {
'locale': ['system']
}
# Set keyboard
# ( This looks like it maps to something else )
sls[ps_opts['d-i']['kbd-chooser']['method']['argument']] = {
'keyboard': ['system']
}
# Set timezone
timezone = ps_opts['d-i']['time']['zone']['argument']
sls[timezone] = {'timezone': ['system']}
if ps_opts['d-i']['tzconfig']['gmt']['argument'] == 'true':
sls[timezone]['timezone'].append('utc')
# Set network
if 'netcfg' in ps_opts['d-i'].keys():
iface = ps_opts['d-i']['netcfg']['choose_interface']['argument']
sls[iface] = {}
sls[iface]['enabled'] = True
if ps_opts['d-i']['netcfg']['confirm_static'] == 'true':
sls[iface]['proto'] = 'static'
elif ps_opts['d-i']['netcfg']['disable_dhcp'] == 'false':
sls[iface]['proto'] = 'dhcp'
sls[iface]['netmask'] = ps_opts['d-i']['netcfg']['get_netmask']['argument']
sls[iface]['domain'] = ps_opts['d-i']['netcfg']['get_domain']['argument']
sls[iface]['gateway'] = ps_opts['d-i']['netcfg']['get_gateway']['argument']
sls[iface]['hostname'] = ps_opts['d-i']['netcfg']['get_hostname']['argument']
sls[iface]['ipaddress'] = ps_opts['d-i']['netcfg']['get_ipaddress']['argument']
sls[iface]['nameservers'] = ps_opts['d-i']['netcfg']['get_nameservers']['argument']
if dst is not None:
with salt.utils.fopen(dst, 'w') as fh_:
fh_.write(yaml.safe_dump(sls, default_flow_style=False))
else:
return yaml.safe_dump(sls, default_flow_style=False)
| apache-2.0 |
ntuecon/server | pyenv/Lib/site-packages/win32comext/shell/demos/browse_for_folder.py | 4 | 1701 | # A couple of samples using SHBrowseForFolder
import sys, os
from win32com.shell import shell, shellcon
import win32gui
# A callback procedure - called by SHBrowseForFolder
def BrowseCallbackProc(hwnd, msg, lp, data):
if msg== shellcon.BFFM_INITIALIZED:
win32gui.SendMessage(hwnd, shellcon.BFFM_SETSELECTION, 1, data)
elif msg == shellcon.BFFM_SELCHANGED:
# Set the status text of the
# For this message, 'lp' is the address of the PIDL.
pidl = shell.AddressAsPIDL(lp)
try:
path = shell.SHGetPathFromIDList(pidl)
win32gui.SendMessage(hwnd, shellcon.BFFM_SETSTATUSTEXT, 0, path)
except shell.error:
# No path for this PIDL
pass
if __name__=='__main__':
# Demonstrate a dialog with the cwd selected as the default - this
# must be done via a callback function.
flags = shellcon.BIF_STATUSTEXT
shell.SHBrowseForFolder(0, # parent HWND
None, # root PIDL.
"Default of %s" % os.getcwd(), # title
flags, # flags
BrowseCallbackProc, # callback function
os.getcwd() # 'data' param for the callback
)
# Browse from this directory down only.
# Get the PIDL for the cwd.
desktop = shell.SHGetDesktopFolder()
cb, pidl, extra = desktop.ParseDisplayName(0, None, os.getcwd())
shell.SHBrowseForFolder(0, # parent HWND
pidl, # root PIDL.
"From %s down only" % os.getcwd(), # title
)
| bsd-3-clause |
KawashiroNitori/Anubis | anubis/handler/base.py | 1 | 16407 | import base64
import accept
import asyncio
import calendar
import functools
import hmac
import logging
import markupsafe
import pytz
import sockjs
import time
import traceback
from aiohttp import web
from email import utils
from anubis import app
from anubis import error
from anubis import template
from anubis.model import token
from anubis.model import builtin
from anubis.model import user
from anubis.model import domain
from anubis.model import opcount
from anubis.model.adaptor import setting
from anubis.service import mailer
from anubis.util import cipher
from anubis.util import json
from anubis.util import locale
from anubis.util import options
_logger = logging.getLogger(__name__)
class HandlerBase(setting.SettingMixin):
NAME = None
TITLE = None
async def prepare(self):
self.session = await self.update_session()
self.domain_id = self.request.match_info.pop('domain_id', builtin.DOMAIN_ID_SYSTEM)
if 'uid' in self.session:
uid = self.session['uid']
self.user, self.domain, self.domain_user = await asyncio.gather(
user.get_by_uid(uid), domain.get(self.domain_id), domain.get_user(self.domain_id, uid)
)
if not self.user:
raise error.UserNotFoundError(uid)
if not self.domain_user:
self.domain_user = {}
self.user = await user.get_by_uid(self.session['uid']) or builtin.USER_GUEST
else:
self.user = builtin.USER_GUEST
self.domain = await domain.get(self.domain_id)
self.domain_user = {}
if not self.domain:
raise error.DomainNotFoundError(self.domain_id)
self.view_lang = self.get_setting('view_lang')
# TODO: UnknownTimeZoneError
self.timezone = pytz.timezone(self.get_setting('timezone'))
self.translate = locale.get_translate(self.view_lang)
self.datetime_span = functools.partial(_datetime_span, timezone=self.timezone)
self.datetime_stamp = _datetime_stamp
self.reverse_url = functools.partial(_reverse_url, domain_id=self.domain_id)
self.build_path = functools.partial(_build_path, domain_id=self.domain_id,
domain_name=self.domain['name'])
if not self.has_priv(builtin.PRIV_VIEW_ALL_DOMAIN):
self.check_perm(builtin.PERM_VIEW)
def has_perm(self, perm):
role = self.domain_user.get('role', builtin.ROLE_GUEST)
mask = self.domain['roles'].get(role, builtin.PERM_NONE)
return ((perm & mask) == perm
or self.domain['owner_uid'] == self.user['_id']
or self.has_priv(builtin.PRIV_MANAGE_ALL_DOMAIN))
def check_perm(self, perm):
if not self.has_perm(perm):
raise error.PermissionError(perm)
def has_priv(self, priv):
return (priv & self.user['priv']) == priv
def check_priv(self, priv):
if not self.has_priv(priv):
raise error.PrivilegeError(priv)
def dudoc_has_perm(self, udoc, dudoc, perm):
if not udoc or not dudoc:
return False
# TODO(iceboy): Fix caller when dudoc=None is passed in.
role = dudoc.get('role', builtin.ROLE_DEFAULT)
mask = self.domain['roles'].get(role, builtin.PERM_NONE)
return ((perm & mask) == perm
or self.domain['owner_uid'] == udoc['_id']
or self.udoc_has_priv(udoc, builtin.PRIV_MANAGE_ALL_DOMAIN))
def udoc_has_priv(self, udoc, priv):
# TODO: Fix caller when udoc=None is passed in.
return (priv & udoc['priv']) == priv
def own(self, doc, perm=builtin.PERM_NONE, field='owner_uid', priv=builtin.PRIV_NONE):
return (doc[field] == self.user['_id']) and self.has_perm(perm) and self.has_priv(priv)
async def update_session(self, *, new_saved=False, **kwargs):
"""Update or create session if necessary.
If 'sid' in cookie, the 'expire_at' field is updated.
If 'sid' not in cookie, only create when there is extra data.
Args:
new_saved: use saved session on creation.
kwargs: extra data.
Returns:
The session document.
"""
(sid, save), session = map(self.request.cookies.get, ['sid', 'save']), None
if not sid or new_saved:
save = new_saved
if save:
token_type = token.TYPE_SAVED_SESSION
session_expire_seconds = options.options.saved_session_expire_seconds
else:
token_type = token.TYPE_UNSAVED_SESSION
session_expire_seconds = options.options.unsaved_session_expire_seconds
if sid:
session = await token.update(sid, token_type, session_expire_seconds,
**{
**kwargs,
'update_ip': self.remote_ip,
'update_ua': self.request.headers.get('User-Agent')
})
if not session:
sid, session = await token.add(token_type, session_expire_seconds,
**{
**kwargs,
'create_ip': self.remote_ip,
'update_ua': self.request.headers.get('User-Agent')
})
if session:
cookie_kwargs = {
'domain': options.options.cookie_domain,
'secure': options.options.cookie_secure,
'httponly': True
}
if save:
timestamp = calendar.timegm(session['expire_at'].utctimetuple())
cookie_kwargs['expires'] = utils.formatdate(timestamp, usegmt=True)
cookie_kwargs['max_age'] = session_expire_seconds
self.response.set_cookie('save', '1', **cookie_kwargs)
self.response.set_cookie('sid', sid, **cookie_kwargs)
else:
self.clear_cookies('sid', 'save')
return session or {}
async def delete_session(self):
sid, save = map(self.request.cookies.get, ['sid', 'save'])
if sid:
if save:
token_type = token.TYPE_SAVED_SESSION
else:
token_type = token.TYPE_UNSAVED_SESSION
await token.delete(sid, token_type)
self.clear_cookies('sid', 'save')
def clear_cookies(self, *names):
for name in names:
if name in self.request.cookies:
self.response.set_cookie(name, '',
expires=utils.formatdate(0, usegmt=True),
domain=options.options.cookie_domain,
secure=options.options.cookie_secure,
httponly=True)
@property
def remote_ip(self):
if options.options.ip_header:
return self.request.headers.get(options.options.ip_header)
else:
return self.request.transport.get_extra_info('peername')[0]
@property
def csrf_token(self):
if self.session:
return _get_csrf_token(self.session['_id'])
else:
return ''
def render_html(self, template_name, **kwargs):
kwargs['handler'] = self
kwargs['_'] = self.translate
kwargs['domain_id'] = self.domain_id
if 'page_name' not in kwargs:
kwargs['page_name'] = self.NAME
if 'page_title' not in kwargs:
kwargs['page_title'] = self.translate(self.TITLE)
if 'path_components' not in kwargs:
kwargs['path_components'] = self.build_path((self.translate(self.NAME), None))
kwargs['reverse_url'] = self.reverse_url
kwargs['datetime_span'] = self.datetime_span
return template.Environment().get_template(template_name).render(kwargs)
class Handler(web.View, HandlerBase):
async def _iter(self):
try:
self.response = web.Response()
await HandlerBase.prepare(self)
await super(Handler, self)._iter()
except error.UserFacingError as e:
_logger.warning('User facing error: %s', repr(e))
self.response.set_status(e.http_status, None)
if self.prefer_json:
self.response.content_type = 'application/json'
self.response.text = json.encode({'error': e.to_dict()})
else:
self.render(e.template_name, error=e,
page_name='error', page_title=self.translate('error'),
path_components=self.build_path(self.translate('error'), None))
except Exception as e:
_logger.error('Unexpected exception occurred when handling %s (IP = %s, UID = %d): %s',
self.url, self.remote_ip, self.user['_id'] or None, repr(e))
if options.options.debug:
raise
body = await self.request.read()
error_info = dict(
url=self.url,
method=self.request.method,
remote_ip=self.remote_ip,
uid=self.user['_id'],
time=int(time.time()),
headers=list(self.request.headers.items()),
body=base64.b64encode(body).decode('utf8'),
exc_stack=traceback.format_exc(),
)
error_json = json.encode(error_info)
error_message = cipher.encrypt(error_json.encode('utf8'))
self.render('500.html', error_message=error_message)
return self.response
def render(self, template_name, **kwargs):
self.response.content_type = 'text/html'
self.response.text = self.render_html(template_name, **kwargs)
def json(self, obj):
self.response.content_type = 'application/json'
self.response.headers.add('Cache-Control', 'no-store, no-cache, must-revalidate')
self.response.text = json.encode(obj)
async def binary(self, data, type='application/octet-stream', *, filename: str=None):
self.response = web.StreamResponse()
self.response.content_length = len(data)
self.response.content_type = type
if filename:
self.response.headers['Content-Disposition'] = 'attachment; filename="{0}"'.format(filename)
await self.response.prepare(self.request)
await self.response.write(data)
async def send_mail(self, mail, title, template_name, **kwargs):
content = self.render_html(template_name, url_prefix=options.options.url_prefix, **kwargs)
await mailer.send_mail(mail, '{0} - SUT Online Judge'.format(self.translate(title)), content)
@property
def prefer_json(self):
for d in accept.parse(self.request.headers.get('Accept')):
if d.media_type == 'application/json':
return True
elif d.media_type == 'text/html' or d.all_types:
return False
return False
@property
def url(self):
return self.request.path
@property
def referer_or_main(self):
return self.request.headers.get('referer') or self.reverse_url('domain_main')
def redirect(self, redirect_url):
self.response.set_status(web.HTTPFound.status_code, None)
self.response.headers['Location'] = redirect_url
def json_or_redirect(self, redirect_url, **kwargs):
if self.prefer_json:
self.json(kwargs)
else:
self.redirect(redirect_url)
def json_or_render(self, template_name, **kwargs):
if self.prefer_json:
self.json(kwargs)
else:
self.render(template_name, **kwargs)
@property
def ui_context(self):
return {'csrf_token': self.csrf_token,
'cdn_prefix': options.options.cdn_prefix,
'url_prefix': options.options.url_prefix}
@property
def user_context(self):
return {'uid': self.user['_id'],
'domain': self.domain_id}
class OperationHandler(Handler):
async def post(self):
arguments = (await self.request.post()).copy()
operation = arguments.pop('operation')
try:
method = getattr(self, 'post_' + operation)
except AttributeError:
raise error.InvalidOperationError(operation) from None
await method(**arguments)
class Connection(sockjs.Session, HandlerBase):
def __init__(self, request, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
self.request = request
self.response = web.Response()
async def on_open(self):
pass
async def on_message(self, **kwargs):
pass
async def on_close(self):
pass
def send(self, **kwargs):
super(Connection, self).send(json.encode(kwargs))
@functools.lru_cache()
def _get_csrf_token(session_id_binary):
return hmac.new(b'csrf_token', session_id_binary.encode(), 'sha256').hexdigest()
@functools.lru_cache()
def _reverse_url(name, *, domain_id, **kwargs):
if domain_id != builtin.DOMAIN_ID_SYSTEM:
name += '_with_domain_id'
kwargs['domain_id'] = domain_id
for k, v in kwargs.items():
kwargs[k] = str(v)
if kwargs:
return str(app.Application().router[name].url_for(**kwargs))
else:
return str(app.Application().router[name].url_for())
@functools.lru_cache()
def _build_path(*args, domain_id, domain_name):
return [(domain_name, _reverse_url('domain_main', domain_id=domain_id)), *args]
@functools.lru_cache()
def _datetime_span(dt, relative=True, format='%Y-%m-%d %H:%M:%S', timezone=pytz.utc):
if not dt.tzinfo:
dt = dt.replace(tzinfo=pytz.utc)
return markupsafe.Markup(
'<span class="time{0}" data-timestamp="{1}">{2}</span>'.format(
' relative' if relative else '',
calendar.timegm(dt.utctimetuple()),
dt.astimezone(timezone).strftime(format)
)
)
@functools.lru_cache()
def _datetime_stamp(dt):
if not dt.tzinfo:
dt = dt.replace(tzinfo=pytz.utc)
return calendar.timegm(dt.utctimetuple())
# Decorators
def require_perm(perm):
def decorate(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.check_perm(perm)
return func(self, *args, **kwargs)
return wrapper
return decorate
def require_priv(priv):
def decorate(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.check_priv(priv)
return func(self, *args, **kwargs)
return wrapper
return decorate
def require_csrf_token(func):
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
if self.csrf_token and self.csrf_token != kwargs.pop('csrf_token', ''):
raise error.CsrfTokenError()
return func(self, *args, **kwargs)
return wrapped
def route_argument(func):
@functools.wraps(func)
def wrapped(self, **kwargs):
return func(self, **kwargs, **self.request.match_info)
return wrapped
def get_argument(func):
@functools.wraps(func)
def wrapped(self, **kwargs):
return func(self, **kwargs, **self.request.query)
return wrapped
def post_argument(func):
@functools.wraps(func)
async def wrapped(self, **kwargs):
data = dict(await self.request.post())
return await func(self, **kwargs, **data)
return wrapped
def limit_rate(op, period_secs, max_operations):
def decorate(func):
@functools.wraps(func)
async def wrapped(self, **kwargs):
await opcount.inc(op, opcount.PREFIX_IP + str(self.remote_ip), period_secs, max_operations)
return await func(self, **kwargs)
return wrapped
return decorate
def sanitize(func):
@functools.wraps(func)
def wrapped(self, **kwargs):
for key, value in kwargs.items():
try:
kwargs[key] = func.__annotations__[key](value)
except KeyError:
pass
return func(self, **kwargs)
return wrapped
| gpl-3.0 |
3dcauldron/repo-to-rename-2 | snippets/migrations/0001_initial.py | 3 | 10626 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-09 18:40
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Snippet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(blank=True, default='', max_length=100)),
('code', models.TextField()),
('linenos', models.BooleanField(default=False)),
('language', models.CharField(choices=[('abap', 'ABAP'), ('ada', 'Ada'), ('agda', 'Agda'), ('ahk', 'autohotkey'), ('alloy', 'Alloy'), ('antlr', 'ANTLR'), ('antlr-as', 'ANTLR With ActionScript Target'), ('antlr-cpp', 'ANTLR With CPP Target'), ('antlr-csharp', 'ANTLR With C# Target'), ('antlr-java', 'ANTLR With Java Target'), ('antlr-objc', 'ANTLR With ObjectiveC Target'), ('antlr-perl', 'ANTLR With Perl Target'), ('antlr-python', 'ANTLR With Python Target'), ('antlr-ruby', 'ANTLR With Ruby Target'), ('apacheconf', 'ApacheConf'), ('apl', 'APL'), ('applescript', 'AppleScript'), ('as', 'ActionScript'), ('as3', 'ActionScript 3'), ('aspectj', 'AspectJ'), ('aspx-cs', 'aspx-cs'), ('aspx-vb', 'aspx-vb'), ('asy', 'Asymptote'), ('at', 'AmbientTalk'), ('autoit', 'AutoIt'), ('awk', 'Awk'), ('basemake', 'Base Makefile'), ('bash', 'Bash'), ('bat', 'Batchfile'), ('bbcode', 'BBCode'), ('befunge', 'Befunge'), ('blitzbasic', 'BlitzBasic'), ('blitzmax', 'BlitzMax'), ('boo', 'Boo'), ('brainfuck', 'Brainfuck'), ('bro', 'Bro'), ('bugs', 'BUGS'), ('c', 'C'), ('c-objdump', 'c-objdump'), ('ca65', 'ca65 assembler'), ('cbmbas', 'CBM BASIC V2'), ('ceylon', 'Ceylon'), ('cfc', 'Coldfusion CFC'), ('cfengine3', 'CFEngine3'), ('cfm', 'Coldfusion HTML'), ('cfs', 'cfstatement'), ('chai', 'ChaiScript'), ('chapel', 'Chapel'), ('cheetah', 'Cheetah'), ('cirru', 'Cirru'), ('clay', 'Clay'), ('clojure', 'Clojure'), ('clojurescript', 'ClojureScript'), ('cmake', 'CMake'), ('cobol', 'COBOL'), ('cobolfree', 'COBOLFree'), ('coffee-script', 'CoffeeScript'), ('common-lisp', 'Common Lisp'), ('console', 'Bash Session'), ('control', 'Debian Control file'), ('coq', 'Coq'), ('cpp', 'C++'), ('cpp-objdump', 'cpp-objdump'), ('croc', 'Croc'), ('cryptol', 'Cryptol'), ('csharp', 'C#'), ('css', 'CSS'), ('css+django', 'CSS+Django/Jinja'), ('css+erb', 'CSS+Ruby'), ('css+genshitext', 'CSS+Genshi Text'), ('css+lasso', 'CSS+Lasso'), ('css+mako', 'CSS+Mako'), ('css+mozpreproc', 'CSS+mozpreproc'), ('css+myghty', 'CSS+Myghty'), ('css+php', 'CSS+PHP'), ('css+smarty', 'CSS+Smarty'), ('cucumber', 'Gherkin'), ('cuda', 'CUDA'), ('cypher', 'Cypher'), ('cython', 'Cython'), ('d', 'D'), ('d-objdump', 'd-objdump'), ('dart', 'Dart'), ('delphi', 'Delphi'), ('dg', 'dg'), ('diff', 'Diff'), ('django', 'Django/Jinja'), ('docker', 'Docker'), ('dpatch', 'Darcs Patch'), ('dtd', 'DTD'), ('duel', 'Duel'), ('dylan', 'Dylan'), ('dylan-console', 'Dylan session'), ('dylan-lid', 'DylanLID'), ('ebnf', 'EBNF'), ('ec', 'eC'), ('ecl', 'ECL'), ('eiffel', 'Eiffel'), ('elixir', 'Elixir'), ('erb', 'ERB'), ('erl', 'Erlang erl session'), ('erlang', 'Erlang'), ('evoque', 'Evoque'), ('factor', 'Factor'), ('fan', 'Fantom'), ('fancy', 'Fancy'), ('felix', 'Felix'), ('fortran', 'Fortran'), ('foxpro', 'FoxPro'), ('fsharp', 'FSharp'), ('gap', 'GAP'), ('gas', 'GAS'), ('genshi', 'Genshi'), ('genshitext', 'Genshi Text'), ('glsl', 'GLSL'), ('gnuplot', 'Gnuplot'), ('go', 'Go'), ('golo', 'Golo'), ('gooddata-cl', 'GoodData-CL'), ('gosu', 'Gosu'), ('groff', 'Groff'), ('groovy', 'Groovy'), ('gst', 'Gosu Template'), ('haml', 'Haml'), ('handlebars', 'Handlebars'), ('haskell', 'Haskell'), ('haxeml', 'Hxml'), ('html', 'HTML'), ('html+cheetah', 'HTML+Cheetah'), ('html+django', 'HTML+Django/Jinja'), ('html+evoque', 'HTML+Evoque'), ('html+genshi', 'HTML+Genshi'), ('html+handlebars', 'HTML+Handlebars'), ('html+lasso', 'HTML+Lasso'), ('html+mako', 'HTML+Mako'), ('html+myghty', 'HTML+Myghty'), ('html+php', 'HTML+PHP'), ('html+smarty', 'HTML+Smarty'), ('html+twig', 'HTML+Twig'), ('html+velocity', 'HTML+Velocity'), ('http', 'HTTP'), ('hx', 'Haxe'), ('hybris', 'Hybris'), ('hylang', 'Hy'), ('i6t', 'Inform 6 template'), ('idl', 'IDL'), ('idris', 'Idris'), ('iex', 'Elixir iex session'), ('igor', 'Igor'), ('inform6', 'Inform 6'), ('inform7', 'Inform 7'), ('ini', 'INI'), ('io', 'Io'), ('ioke', 'Ioke'), ('irc', 'IRC logs'), ('isabelle', 'Isabelle'), ('jade', 'Jade'), ('jags', 'JAGS'), ('jasmin', 'Jasmin'), ('java', 'Java'), ('javascript+mozpreproc', 'Javascript+mozpreproc'), ('jlcon', 'Julia console'), ('js', 'JavaScript'), ('js+cheetah', 'JavaScript+Cheetah'), ('js+django', 'JavaScript+Django/Jinja'), ('js+erb', 'JavaScript+Ruby'), ('js+genshitext', 'JavaScript+Genshi Text'), ('js+lasso', 'JavaScript+Lasso'), ('js+mako', 'JavaScript+Mako'), ('js+myghty', 'JavaScript+Myghty'), ('js+php', 'JavaScript+PHP'), ('js+smarty', 'JavaScript+Smarty'), ('json', 'JSON'), ('jsonld', 'JSON-LD'), ('jsp', 'Java Server Page'), ('julia', 'Julia'), ('kal', 'Kal'), ('kconfig', 'Kconfig'), ('koka', 'Koka'), ('kotlin', 'Kotlin'), ('lagda', 'Literate Agda'), ('lasso', 'Lasso'), ('lcry', 'Literate Cryptol'), ('lean', 'Lean'), ('lhs', 'Literate Haskell'), ('lidr', 'Literate Idris'), ('lighty', 'Lighttpd configuration file'), ('limbo', 'Limbo'), ('liquid', 'liquid'), ('live-script', 'LiveScript'), ('llvm', 'LLVM'), ('logos', 'Logos'), ('logtalk', 'Logtalk'), ('lsl', 'LSL'), ('lua', 'Lua'), ('make', 'Makefile'), ('mako', 'Mako'), ('maql', 'MAQL'), ('mask', 'Mask'), ('mason', 'Mason'), ('mathematica', 'Mathematica'), ('matlab', 'Matlab'), ('matlabsession', 'Matlab session'), ('minid', 'MiniD'), ('modelica', 'Modelica'), ('modula2', 'Modula-2'), ('monkey', 'Monkey'), ('moocode', 'MOOCode'), ('moon', 'MoonScript'), ('mozhashpreproc', 'mozhashpreproc'), ('mozpercentpreproc', 'mozpercentpreproc'), ('mql', 'MQL'), ('mscgen', 'Mscgen'), ('mupad', 'MuPAD'), ('mxml', 'MXML'), ('myghty', 'Myghty'), ('mysql', 'MySQL'), ('nasm', 'NASM'), ('nemerle', 'Nemerle'), ('nesc', 'nesC'), ('newlisp', 'NewLisp'), ('newspeak', 'Newspeak'), ('nginx', 'Nginx configuration file'), ('nimrod', 'Nimrod'), ('nit', 'Nit'), ('nixos', 'Nix'), ('nsis', 'NSIS'), ('numpy', 'NumPy'), ('objdump', 'objdump'), ('objdump-nasm', 'objdump-nasm'), ('objective-c', 'Objective-C'), ('objective-c++', 'Objective-C++'), ('objective-j', 'Objective-J'), ('ocaml', 'OCaml'), ('octave', 'Octave'), ('ooc', 'Ooc'), ('opa', 'Opa'), ('openedge', 'OpenEdge ABL'), ('pan', 'Pan'), ('pawn', 'Pawn'), ('perl', 'Perl'), ('perl6', 'Perl6'), ('php', 'PHP'), ('pig', 'Pig'), ('pike', 'Pike'), ('plpgsql', 'PL/pgSQL'), ('postgresql', 'PostgreSQL SQL dialect'), ('postscript', 'PostScript'), ('pot', 'Gettext Catalog'), ('pov', 'POVRay'), ('powershell', 'PowerShell'), ('prolog', 'Prolog'), ('properties', 'Properties'), ('protobuf', 'Protocol Buffer'), ('psql', 'PostgreSQL console (psql)'), ('puppet', 'Puppet'), ('py3tb', 'Python 3.0 Traceback'), ('pycon', 'Python console session'), ('pypylog', 'PyPy Log'), ('pytb', 'Python Traceback'), ('python', 'Python'), ('python3', 'Python 3'), ('qbasic', 'QBasic'), ('qml', 'QML'), ('racket', 'Racket'), ('ragel', 'Ragel'), ('ragel-c', 'Ragel in C Host'), ('ragel-cpp', 'Ragel in CPP Host'), ('ragel-d', 'Ragel in D Host'), ('ragel-em', 'Embedded Ragel'), ('ragel-java', 'Ragel in Java Host'), ('ragel-objc', 'Ragel in Objective C Host'), ('ragel-ruby', 'Ragel in Ruby Host'), ('raw', 'Raw token data'), ('rb', 'Ruby'), ('rbcon', 'Ruby irb session'), ('rconsole', 'RConsole'), ('rd', 'Rd'), ('rebol', 'REBOL'), ('red', 'Red'), ('redcode', 'Redcode'), ('registry', 'reg'), ('resource', 'ResourceBundle'), ('rexx', 'Rexx'), ('rhtml', 'RHTML'), ('robotframework', 'RobotFramework'), ('rql', 'RQL'), ('rsl', 'RSL'), ('rst', 'reStructuredText'), ('rust', 'Rust'), ('sass', 'Sass'), ('scala', 'Scala'), ('scaml', 'Scaml'), ('scheme', 'Scheme'), ('scilab', 'Scilab'), ('scss', 'SCSS'), ('shell-session', 'Shell Session'), ('slim', 'Slim'), ('smali', 'Smali'), ('smalltalk', 'Smalltalk'), ('smarty', 'Smarty'), ('sml', 'Standard ML'), ('snobol', 'Snobol'), ('sourceslist', 'Debian Sourcelist'), ('sp', 'SourcePawn'), ('sparql', 'SPARQL'), ('spec', 'RPMSpec'), ('splus', 'S'), ('sql', 'SQL'), ('sqlite3', 'sqlite3con'), ('squidconf', 'SquidConf'), ('ssp', 'Scalate Server Page'), ('stan', 'Stan'), ('swift', 'Swift'), ('swig', 'SWIG'), ('systemverilog', 'systemverilog'), ('tads3', 'TADS 3'), ('tcl', 'Tcl'), ('tcsh', 'Tcsh'), ('tea', 'Tea'), ('tex', 'TeX'), ('text', 'Text only'), ('todotxt', 'Todotxt'), ('trac-wiki', 'MoinMoin/Trac Wiki markup'), ('treetop', 'Treetop'), ('ts', 'TypeScript'), ('twig', 'Twig'), ('urbiscript', 'UrbiScript'), ('vala', 'Vala'), ('vb.net', 'VB.net'), ('vctreestatus', 'VCTreeStatus'), ('velocity', 'Velocity'), ('verilog', 'verilog'), ('vgl', 'VGL'), ('vhdl', 'vhdl'), ('vim', 'VimL'), ('xml', 'XML'), ('xml+cheetah', 'XML+Cheetah'), ('xml+django', 'XML+Django/Jinja'), ('xml+erb', 'XML+Ruby'), ('xml+evoque', 'XML+Evoque'), ('xml+lasso', 'XML+Lasso'), ('xml+mako', 'XML+Mako'), ('xml+myghty', 'XML+Myghty'), ('xml+php', 'XML+PHP'), ('xml+smarty', 'XML+Smarty'), ('xml+velocity', 'XML+Velocity'), ('xquery', 'XQuery'), ('xslt', 'XSLT'), ('xtend', 'Xtend'), ('xul+mozpreproc', 'XUL+mozpreproc'), ('yaml', 'YAML'), ('yaml+jinja', 'YAML+Jinja'), ('zephir', 'Zephir')], default='python', max_length=100)),
('style', models.CharField(choices=[('autumn', 'autumn'), ('borland', 'borland'), ('bw', 'bw'), ('colorful', 'colorful'), ('default', 'default'), ('emacs', 'emacs'), ('friendly', 'friendly'), ('fruity', 'fruity'), ('igor', 'igor'), ('manni', 'manni'), ('monokai', 'monokai'), ('murphy', 'murphy'), ('native', 'native'), ('paraiso-dark', 'paraiso-dark'), ('paraiso-light', 'paraiso-light'), ('pastie', 'pastie'), ('perldoc', 'perldoc'), ('rrt', 'rrt'), ('tango', 'tango'), ('trac', 'trac'), ('vim', 'vim'), ('vs', 'vs'), ('xcode', 'xcode')], default='friendly', max_length=100)),
('highlighted', models.TextField()),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='snippets', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('created',),
},
),
]
| mit |
rds0751/colinkers | env/Lib/encodings/cp1026.py | 272 | 13113 | """ Python Character Mapping Codec cp1026 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP1026.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1026',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\xa0' # 0x41 -> NO-BREAK SPACE
'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
'{' # 0x48 -> LEFT CURLY BRACKET
'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
'\xc7' # 0x4A -> LATIN CAPITAL LETTER C WITH CEDILLA
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'!' # 0x4F -> EXCLAMATION MARK
'&' # 0x50 -> AMPERSAND
'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
'\u011e' # 0x5A -> LATIN CAPITAL LETTER G WITH BREVE
'\u0130' # 0x5B -> LATIN CAPITAL LETTER I WITH DOT ABOVE
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'^' # 0x5F -> CIRCUMFLEX ACCENT
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'[' # 0x68 -> LEFT SQUARE BRACKET
'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
'\u015f' # 0x6A -> LATIN SMALL LETTER S WITH CEDILLA
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
'\u0131' # 0x79 -> LATIN SMALL LETTER DOTLESS I
':' # 0x7A -> COLON
'\xd6' # 0x7B -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\u015e' # 0x7C -> LATIN CAPITAL LETTER S WITH CEDILLA
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'\xdc' # 0x7F -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'}' # 0x8C -> RIGHT CURLY BRACKET
'`' # 0x8D -> GRAVE ACCENT
'\xa6' # 0x8E -> BROKEN BAR
'\xb1' # 0x8F -> PLUS-MINUS SIGN
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
'\xb8' # 0x9D -> CEDILLA
'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
'\xa4' # 0x9F -> CURRENCY SIGN
'\xb5' # 0xA0 -> MICRO SIGN
'\xf6' # 0xA1 -> LATIN SMALL LETTER O WITH DIAERESIS
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
'\xbf' # 0xAB -> INVERTED QUESTION MARK
']' # 0xAC -> RIGHT SQUARE BRACKET
'$' # 0xAD -> DOLLAR SIGN
'@' # 0xAE -> COMMERCIAL AT
'\xae' # 0xAF -> REGISTERED SIGN
'\xa2' # 0xB0 -> CENT SIGN
'\xa3' # 0xB1 -> POUND SIGN
'\xa5' # 0xB2 -> YEN SIGN
'\xb7' # 0xB3 -> MIDDLE DOT
'\xa9' # 0xB4 -> COPYRIGHT SIGN
'\xa7' # 0xB5 -> SECTION SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
'\xac' # 0xBA -> NOT SIGN
'|' # 0xBB -> VERTICAL LINE
'\xaf' # 0xBC -> MACRON
'\xa8' # 0xBD -> DIAERESIS
'\xb4' # 0xBE -> ACUTE ACCENT
'\xd7' # 0xBF -> MULTIPLICATION SIGN
'\xe7' # 0xC0 -> LATIN SMALL LETTER C WITH CEDILLA
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'~' # 0xCC -> TILDE
'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
'\u011f' # 0xD0 -> LATIN SMALL LETTER G WITH BREVE
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb9' # 0xDA -> SUPERSCRIPT ONE
'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\\' # 0xDC -> REVERSE SOLIDUS
'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
'\xfc' # 0xE0 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xf7' # 0xE1 -> DIVISION SIGN
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'#' # 0xEC -> NUMBER SIGN
'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'"' # 0xFC -> QUOTATION MARK
'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| agpl-3.0 |
yvaucher/stock-logistics-workflow | __unported__/stock_location_flow_creator/stock_location_path_template.py | 33 | 1734 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher (Camptocamp)
# Copyright 2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Template of stock location path object """
from openerp.osv.orm import Model
from openerp.addons.stock_orderpoint_creator.base_product_config_template import BaseProductConfigTemplate
class StockLocationPath(BaseProductConfigTemplate, Model):
_name = 'stock.location.path.template'
_inherit = 'stock.location.path'
_table = 'stock_location_path_template'
_clean_mode = 'unlink'
def _get_ids_2_clean(self, cursor, uid, template_br, product_ids, context=None):
""" hook to select model specific objects to clean
return must return a list of id"""
model_obj = self._get_model()
ids_to_del = model_obj.search(cursor, uid,
[('product_id', 'in', product_ids)])
return ids_to_del
| agpl-3.0 |
13W/phantomjs | src/breakpad/src/tools/gyp/pylib/gyp/MSVSProject.py | 137 | 7491 | #!/usr/bin/python2.4
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import common
import xml.dom
import xml.dom.minidom
import MSVSNew
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self.name = name
self.attrs = attrs or {}
def CreateElement(self, doc):
"""Creates an element for the tool.
Args:
doc: xml.dom.Document object to use for node creation.
Returns:
A new xml.dom.Element for the tool.
"""
node = doc.createElement('Tool')
node.setAttribute('Name', self.name)
for k, v in self.attrs.items():
node.setAttribute(k, v)
return node
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
"""
self.project_path = project_path
self.doc = None
self.version = version
def Create(self, name, guid=None, platforms=None):
"""Creates the project document.
Args:
name: Name of the project.
guid: GUID to use for project, if not None.
"""
self.name = name
self.guid = guid or MSVSNew.MakeGuid(self.project_path)
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Create XML doc
xml_impl = xml.dom.getDOMImplementation()
self.doc = xml_impl.createDocument(None, 'VisualStudioProject', None)
# Add attributes to root element
self.n_root = self.doc.documentElement
self.n_root.setAttribute('ProjectType', 'Visual C++')
self.n_root.setAttribute('Version', self.version.ProjectVersion())
self.n_root.setAttribute('Name', self.name)
self.n_root.setAttribute('ProjectGUID', self.guid)
self.n_root.setAttribute('RootNamespace', self.name)
self.n_root.setAttribute('Keyword', 'Win32Proj')
# Add platform list
n_platform = self.doc.createElement('Platforms')
self.n_root.appendChild(n_platform)
for platform in platforms:
n = self.doc.createElement('Platform')
n.setAttribute('Name', platform)
n_platform.appendChild(n)
# Add tool files section
self.n_tool_files = self.doc.createElement('ToolFiles')
self.n_root.appendChild(self.n_tool_files)
# Add configurations section
self.n_configs = self.doc.createElement('Configurations')
self.n_root.appendChild(self.n_configs)
# Add empty References section
self.n_root.appendChild(self.doc.createElement('References'))
# Add files section
self.n_files = self.doc.createElement('Files')
self.n_root.appendChild(self.n_files)
# Keep a dict keyed on filename to speed up access.
self.n_files_dict = dict()
# Add empty Globals section
self.n_root.appendChild(self.doc.createElement('Globals'))
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
n_tool = self.doc.createElement('ToolFile')
n_tool.setAttribute('RelativePath', path)
self.n_tool_files.appendChild(n_tool)
def _AddConfigToNode(self, parent, config_type, config_name, attrs=None,
tools=None):
"""Adds a configuration to the parent node.
Args:
parent: Destination node.
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
n_config = self.doc.createElement(config_type)
n_config.setAttribute('Name', config_name)
for k, v in attrs.items():
n_config.setAttribute(k, v)
parent.appendChild(n_config)
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
n_config.appendChild(t.CreateElement(self.doc))
else:
n_config.appendChild(Tool(t).CreateElement(self.doc))
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
self._AddConfigToNode(self.n_configs, 'Configuration', name, attrs, tools)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = self.doc.createElement('Filter')
node.setAttribute('Name', f.name)
self._AddFilesToNode(node, f.contents)
else:
node = self.doc.createElement('File')
node.setAttribute('RelativePath', f)
self.n_files_dict[f] = node
parent.appendChild(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.n_files, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.n_files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
self._AddConfigToNode(parent, 'FileConfiguration', config, attrs, tools)
def Write(self, writer=common.WriteOnDiff):
"""Writes the project file."""
f = writer(self.project_path)
self.doc.writexml(f, encoding='Windows-1252', addindent=' ', newl='\r\n')
f.close()
#------------------------------------------------------------------------------
| bsd-3-clause |
little-dude/monolithe | setup.py | 1 | 2205 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup, find_packages
setup(
name="monolithe",
packages=find_packages(exclude=["*tests*"]),
include_package_data=True,
version="1.1.8",
description="Monolithe is a sdk generator",
author="Christophe Serafin, Antoine Mercadal",
author_email="christophe.serafin@nuagenetworks.net, antoine@nuagenetworks.net",
url="https://github.com/nuagenetworks/monolithe",
classifiers=[],
install_requires=[line for line in open("requirements.txt")],
entry_points={
"console_scripts": [
"monogen = monolithe.cli:main"
]
}
)
| bsd-3-clause |
MostlyOpen/odoo_addons | myo_summary/models/summary.py | 1 | 2051 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from datetime import datetime
from openerp import api, fields, models
class Summary(models.Model):
_name = "myo.summary"
@api.multi
@api.depends('name', 'code')
def name_get(self):
result = []
for record in self:
result.append(
(record.id,
u'%s [%s]' % (record.name, record.code)
))
return result
name = fields.Char('Summary for', required=True, help="Summary Name")
code = fields.Char('Summary Code', help="Summary Code")
date_summary = fields.Datetime(
'Summary Date',
default=lambda *a: datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
user_id = fields.Many2one('res.users', 'Summary Responsible', required=False, readonly=False)
notes = fields.Text(string='Notes')
active = fields.Boolean(
'Active',
help="If unchecked, it will allow you to hide the summary without removing it.",
default=1
)
_sql_constraints = [
(
'code_uniq',
'UNIQUE (code)',
'Error! The Code must be unique!'
),
]
_order = 'name'
| agpl-3.0 |
X-DataInitiative/tick | tick/base/base.py | 2 | 20133 | # License: BSD 3 clause
# import warnings
import os
import inspect
from datetime import datetime
from abc import ABCMeta
import json
import pydoc
import numpy as np
import numpydoc as nd
from numpydoc import docscrape
import copy
# The metaclass inherits from ABCMeta and not type, since we'd like to
# do abstract classes in tick that inherits from ABC
# TODO: readonly attributes cannot be linked to c++ setters
class BaseMeta(ABCMeta):
# Default behaviour of an attribute is writable, with no C++ setter
default_attrinfo = {"writable": True, "cpp_setter": None}
default_classinfo = {
'is_prop': False,
'in_doc': False,
'doc': [],
'in_init': False
}
@staticmethod
def hidden_attr(attr_name):
return '__' + attr_name
@staticmethod
def set_cpp_attribute(self, val, cpp_setter):
""" Set the linked cpp attribute if possible
Parameters
----------
self : `object`
the instance
val : `object`
the value to be set
cpp_setter : `function`
the function to use
"""
# First we get the C++ object from its name (its name is an attribute
# in the class)
if not hasattr(self, "_cpp_obj_name"):
raise NameError("_cpp_obj_name must be set as class attribute to "
"use automatic C++ setters")
# Retrieve C++ associated object if it has been instantiated
cpp_obj = None
if hasattr(self, self._cpp_obj_name):
cpp_obj = getattr(self, self._cpp_obj_name)
# If the cpp_obj is instantiated, we update it
if cpp_obj is not None:
# Get the setter for this attribute in the C++
if not hasattr(cpp_obj, cpp_setter):
raise NameError("%s is not a method of %s" %
(cpp_setter, cpp_obj.__class__))
cpp_obj_setter = getattr(cpp_obj, cpp_setter)
cpp_obj_setter(val)
@staticmethod
def detect_if_called_in_init(self):
"""This function examine stacktrace in order to determine if it has
been called from the __init__ function of the given instance
Parameters
----------
self : `object`
The instance
Returns
-------
set_int_init : `bool`
True if this function was called by __init__
"""
# It is forbidden to set a readonly (non writable) attribute
# expect from __init__ function of the class
set_in_init = False
# trace contains information of the current execution
# environment
trace = inspect.currentframe()
while trace is not None:
# We retrieve the name of the executor (for example the
# function that launched this command)
exec_name = trace.f_code.co_name
# We inspect the local variables
if 'self' in trace.f_locals:
local_self = trace.f_locals['self']
else:
local_self = None
# We check if environment corresponds to our instance's
# __init__
if exec_name == '__init__' and local_self == self:
set_in_init = True
break
# If this frame was not the good one, we try the previous
# one, the one that has launched it
# If there is no previous one, `None` will be returned
trace = trace.f_back
return set_in_init
@staticmethod
def build_property(class_name, attrs, attr_name, writable, cpp_setter):
"""
Builds a property
Parameters
----------
class_name : `str`
Name of the class
attrs : `dict`
The attributes of the class
attr_name : `str`
Name of the attribute for which we build a property
writable : `bool`
If True, we attribute can be changed by the user. If not,
then an error will be raise when trying to change it.
override : `bool`
Not implemented yet
cpp_setter : `str` or `None`
Name of the setter in the c++ object embedded in the class
for the attribute
Returns
-------
output : property
The required property
"""
hidden_name = BaseMeta.hidden_attr(attr_name)
def getter(self):
# if it was not assigned yet we raise the correct error message
if not hasattr(self, hidden_name):
raise AttributeError("'%s' object has no attribute '%s'" %
(class_name, attr_name))
# Get the attribute
return object.__getattribute__(self, hidden_name)
def create_base_setter():
if cpp_setter is None:
# There is no C++ setter, we just set the attribute
def setter(self, val):
object.__setattr__(self, hidden_name, val)
else:
# There is a C++ setter to apply
def setter(self, val):
object.__setattr__(self, hidden_name, val)
# We update the C++ object embedded in the class
# as well.
BaseMeta.set_cpp_attribute(self, val, cpp_setter)
return setter
base_setter = create_base_setter()
if writable:
setter = base_setter
else:
# If it is not writable we wrap the base setter with something
# that detect if attribute setting was called in __init__
def setter(self, val):
set_in_init = Base.detect_if_called_in_init(self)
# If and only if this was launched from our instance's __init__
# we allow user to set the attribute
if set_in_init:
base_setter(self, val)
else:
raise AttributeError(
"%s is readonly in %s" % (str(attr_name), class_name))
def deletter(self):
raise AttributeError(
"can't delete %s in %s" % (str(attr_name), class_name))
# We set doc to None otherwise it will interfere with
# the docstring of the class.
# This is very useful as we can have bugs with sphinx
# otherwise (conflict between an attribute's docstring
# and a docstring of a property with the same name).
# All attributes are actually properties when the base
# class is Base.
# The docstring of all properties are then putted back
# in the __init__ of the Base class below.
prop = property(getter, setter, deletter, None)
return prop
@staticmethod
def create_property_doc(class_name, attr_doc):
"""Create doc that will be attached to property
Parameters
----------
class_name : `str`
Name of the class the property comes from
attr_doc : `list`
List output by numpydoc contained parsed documentation
Returns
-------
The formatted doc
"""
attr_type = attr_doc[1]
attr_docstring = [
line for line in attr_doc[2] if len(line.strip()) > 0
]
attr_from = 'from %s' % class_name
doc = [attr_type] + attr_docstring + [attr_from]
return doc
@staticmethod
def find_init_params(attrs):
"""Find the parameters passed to the class's __init__
"""
ignore = ['self', 'args', 'kwargs']
# if class has no __init__ method
if "__init__" not in attrs:
return []
return [
key
for key in inspect.signature(attrs["__init__"]).parameters.keys()
if key not in ignore
]
@staticmethod
def find_properties(attrs):
"""Find all native properties of the class
"""
return [
attr_name for attr_name, value in attrs.items()
if isinstance(value, property)
]
@staticmethod
def find_documented_attributes(class_name, attrs):
"""Parse the documentation to retrieve all attributes that have been
documented and their documentation
"""
# If a class is not documented we return an empty list
if '__doc__' not in attrs:
return []
current_class_doc = inspect.cleandoc(attrs['__doc__'])
parsed_doc = docscrape.ClassDoc(None, doc=current_class_doc)
attr_docs = parsed_doc['Parameters'] + parsed_doc['Attributes'] + \
parsed_doc['Other Parameters']
attr_and_doc = []
create_property_doc = BaseMeta.create_property_doc
for attr_doc in attr_docs:
attr_name = attr_doc[0]
if ':' in attr_name:
raise ValueError("Attribute '%s' has not a proper "
"documentation, a space might be missing "
"before colon" % attr_name)
attr_and_doc += [(attr_name,
create_property_doc(class_name, attr_doc))]
return attr_and_doc
@staticmethod
def extract_attrinfos(class_name, attrs):
"""Inspect class attrs to create aggregate all attributes info of the
current class
In practice, we inspect documented attributes, properties,
parameters given to __init__ function and finally what user has
filled in _attrinfos
Parameters
----------
class_name : `str`
The name of the class (needed to create associated doc)
atts : `dict`
Dictionary of all futures attributes of the class
Returns
-------
current_attrinfos : `dict`
Subdict of the global classinfos dict concerning the attributes
of the current class.
"""
current_attrinfos = {}
# First we look at all documented attributes
for attr_name, attr_doc in \
BaseMeta.find_documented_attributes(class_name, attrs):
current_attrinfos.setdefault(attr_name, {})
current_attrinfos[attr_name]['in_doc'] = True
current_attrinfos[attr_name]['doc'] = attr_doc
# Second we look all native properties
for attr_name in BaseMeta.find_properties(attrs):
current_attrinfos.setdefault(attr_name, {})
current_attrinfos[attr_name]['is_prop'] = True
# Third we look at parameters given to __init__
for attr_name in BaseMeta.find_init_params(attrs):
current_attrinfos.setdefault(attr_name, {})
current_attrinfos[attr_name]['in_init'] = True
# Finally we use _attrinfos provided dictionary
attrinfos = attrs.get("_attrinfos", {})
for attr_name in attrinfos.keys():
# Check that no unexpected key appears
for key in attrinfos[attr_name].keys():
if key not in BaseMeta.default_attrinfo:
raise ValueError('_attrinfos does not handle key %s' % key)
# Add behavior specified in attrinfo
current_attrinfos.setdefault(attr_name, {})
current_attrinfos[attr_name].update(attrinfos[attr_name])
return current_attrinfos
@staticmethod
def inherited_classinfos(bases):
"""Looks at all classinfos dictionary of bases class and merge them to
create the initial classinfos dictionary
Parameters
----------
bases : `list`
All the bases of the class
Returns
-------
The initial classinfos dictionary
Notes
-----
index corresponds to the distance in terms of inheritance.
The highest index (in terms of inheritance) at
which this class has been seen. We take the highest in
case of multiple inheritance. If we have the following :
A0
/ \
A1 B1
| |
A2 |
\/
A3
We want index of A0 to be higher than index of A1 which
inherits from A0.
In this example:
* A3 has index 0
* A2 and B1 have index 1
* A1 has index 2
* A0 has index 3 (even if it could be 2 through B1)
"""
classinfos = {}
for base in bases:
if hasattr(base, "_classinfos"):
for cls_key in base._classinfos:
base_infos = base._classinfos[cls_key]
if cls_key in classinfos:
current_info = classinfos[cls_key]
current_info['index'] = max(current_info['index'],
base_infos['index'] + 1)
else:
classinfos[cls_key] = {}
classinfos[cls_key]['index'] = base_infos['index'] + 1
classinfos[cls_key]['attr'] = base_infos['attr']
return classinfos
@staticmethod
def create_attrinfos(classinfos):
"""Browse all class in classinfos dict to create a final attrinfo dict
Parameters
----------
classinfos : `dict`
The final classinfos dict
Returns
-------
attrinfos : `dict`
Dictionary in which key is an attribute name and value is a dict
with all its information.
"""
# We sort the doc reversely by index, in order to see the
# furthest classes first (and potentially override infos of
# parents for an attribute if two classes document it)
attrinfos = {}
for cls_key, info_index in sorted(classinfos.items(),
key=lambda item: item[1]['index'],
reverse=True):
classinfos = info_index['attr']
for attr_name in classinfos:
attrinfos.setdefault(attr_name, {})
attrinfos[attr_name].update(classinfos[attr_name])
return attrinfos
def __new__(mcs, class_name, bases, attrs):
# Initialize classinfos dictionnary with all classinfos dictionnary
# of bases
classinfos = BaseMeta.inherited_classinfos(bases)
# Inspect current class to have get information about its atributes
# cls_key is an unique hashable identifier for the class
cls_key = '%s.%s' % (attrs['__module__'], attrs['__qualname__'])
classinfos[cls_key] = {'index': 0}
extract_attrinfos = BaseMeta.extract_attrinfos
classinfos[cls_key]['attr'] = extract_attrinfos(class_name, attrs)
# Once we have collected all classinfos we can extract from it all
# attributes information
attrinfos = BaseMeta.create_attrinfos(classinfos)
attrs["_classinfos"] = classinfos
attrs["_attrinfos"] = attrinfos
build_property = BaseMeta.build_property
# Create properties for all attributes described in attrinfos if they
# are not already a property; This allow us to set a special behavior
for attr_name, info in attrinfos.items():
attr_is_property = attrinfos[attr_name].get('is_prop', False)
# We create the corresponding property if our item is not a property
if not attr_is_property:
writable = info.get("writable",
BaseMeta.default_attrinfo["writable"])
cpp_setter = info.get("cpp_setter",
BaseMeta.default_attrinfo["cpp_setter"])
attrs[attr_name] = build_property(class_name, attrs, attr_name,
writable, cpp_setter)
# Add a __setattr__ method that forbids to add an non-existing
# attribute
def __setattr__(self, key, val):
if key in attrinfos:
object.__setattr__(self, key, val)
else:
raise AttributeError("'%s' object has no settable attribute "
"'%s'" % (class_name, key))
attrs["__setattr__"] = __setattr__
# Add a method allowing to force set an attribute
def _set(self, key: str, val):
"""A method allowing to force set an attribute
"""
if not isinstance(key, str):
raise ValueError(
'In _set function you must pass key as string')
if key not in attrinfos:
raise AttributeError("'%s' object has no settable attribute "
"'%s'" % (class_name, key))
object.__setattr__(self, BaseMeta.hidden_attr(key), val)
cpp_setter = self._attrinfos[key].get(
"cpp_setter", BaseMeta.default_attrinfo["cpp_setter"])
if cpp_setter is not None:
BaseMeta.set_cpp_attribute(self, val, cpp_setter)
attrs["_set"] = _set
return ABCMeta.__new__(mcs, class_name, bases, attrs)
def __init__(cls, class_name, bases, attrs):
return ABCMeta.__init__(cls, class_name, bases, attrs)
class Base(metaclass=BaseMeta):
"""The BaseClass of the tick project. This relies on some dark
magic based on a metaclass. The aim is to have read-only attributes,
docstring for all parameters, and some other nasty features
Attributes
----------
name : str (read-only)
Name of the class
"""
_attrinfos = {
"name": {
"writable": False
},
}
def __init__(self, *args, **kwargs):
# We add the name of the class
self._set("name", self.__class__.__name__)
for attr_name, prop in self.__class__.__dict__.items():
if isinstance(prop, property):
if attr_name in self._attrinfos and len(
self._attrinfos[attr_name].get('doc', [])) > 0:
# we create the property documentation based o what we
# have found in the docstring.
# First we will have the type of the property, then the
# documentation and finally the closest class (in terms
# of inheritance) in which it is documented
# Note: We join doc with '-' instead of '\n'
# because multiline doc does not print well in iPython
prop_doc = self._attrinfos[attr_name]['doc']
prop_doc = ' - '.join([
str(d).strip() for d in prop_doc
if len(str(d).strip()) > 0
])
# We copy property and add the doc found in docstring
setattr(
self.__class__, attr_name,
property(prop.fget, prop.fset, prop.fdel, prop_doc))
@staticmethod
def _get_now():
return datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f')
def _as_dict(self):
dd = {}
for key in self._attrinfos.keys():
# private and protected attributes are not shown in the
# dict
if not key.startswith("_"):
dd[key] = getattr(self, key)
return dd
def _inc_attr(self, key: str, step: int = 1):
"""Increment an attribute of the class by ``step``
Parameters
----------
key : `str`
Name of the class's attribute
step : `int`
Size of the increase
"""
self._set(key, getattr(self, key) + step)
def __str__(self):
dic = self._as_dict()
if 'dtype' in dic and isinstance(dic['dtype'], np.dtype):
dic['dtype'] = dic['dtype'].name
return json.dumps(dic, sort_keys=True, indent=2)
| bsd-3-clause |
Censio/filterpy | filterpy/kalman/tests/test_sensor_fusion.py | 1 | 2585 | # -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy.random as random
from numpy.random import randn
import numpy as np
from filterpy.kalman import KalmanFilter
from numpy import array, asarray
SEED = 1124
def single_measurement_test():
dt = 0.1
sigma = 2.
kf2 = KalmanFilter(dim_x=2, dim_z=1)
kf2.F = array ([[1., dt], [0., 1.]])
kf2.H = array ([[1., 0.]])
kf2.x = array ([[0.], [1.]])
kf2.Q = array ([[dt**3/3, dt**2/2],
[dt**2/2, dt]]) * 0.02
kf2.P *= 100
kf2.R[0,0] = sigma**2
random.seed(SEED)
xs = []
zs = []
nom = []
for i in range(1, 100):
m0 = i + randn()*sigma
z = array([[m0]])
kf2.predict()
kf2.update(z)
xs.append(kf2.x.T[0])
zs.append(z.T[0])
nom.append(i)
xs = asarray(xs)
zs = asarray(zs)
nom = asarray(nom)
res = nom-xs[:,0]
std_dev = np.std(res)
print('std: {:.3f}'.format (std_dev))
return std_dev
def sensor_fusion_test(wheel_sigma=2., gps_sigma=4.):
dt = 0.1
kf2 = KalmanFilter(dim_x=2, dim_z=2)
kf2.F = array ([[1., dt], [0., 1.]])
kf2.H = array ([[1., 0.], [1., 0.]])
kf2.x = array ([[0.], [0.]])
kf2.Q = array ([[dt**3/3, dt**2/2],
[dt**2/2, dt]]) * 0.02
kf2.P *= 100
kf2.R[0,0] = wheel_sigma**2
kf2.R[1,1] = gps_sigma**2
random.seed(SEED)
xs = []
zs = []
nom = []
for i in range(1, 100):
m0 = i + randn()*wheel_sigma
m1 = i + randn()*gps_sigma
if gps_sigma >1e40:
m1 = -1e40
z = array([[m0], [m1]])
kf2.predict()
kf2.update(z)
xs.append(kf2.x.T[0])
zs.append(z.T[0])
nom.append(i)
xs = asarray(xs)
zs = asarray(zs)
nom = asarray(nom)
res = nom-xs[:,0]
std_dev = np.std(res)
print('fusion std: {:.3f}'.format (np.std(res)))
print(kf2.Q)
print(kf2.K)
return std_dev
def test_fusion():
std1 = sensor_fusion_test()
std2 = single_measurement_test()
assert (std1 < std2)
if __name__ == "__main__":
sensor_fusion_test(2,4e100)
single_measurement_test()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.